blob: 921d9b69eb3021441994bdcc3bf02701b2e0f583 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
6
7#include "src/base/bits.h"
8#include "src/code-factory.h"
9#include "src/code-stubs.h"
10#include "src/crankshaft/hydrogen-osr.h"
11#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/profiler/cpu-profiler.h"
15
16namespace v8 {
17namespace internal {
18
19
20class SafepointGenerator final : public CallWrapper {
21 public:
22 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
23 Safepoint::DeoptMode mode)
24 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
25 virtual ~SafepointGenerator() {}
26
27 void BeforeCall(int call_size) const override {}
28
29 void AfterCall() const override {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
31 }
32
33 private:
34 LCodeGen* codegen_;
35 LPointerMap* pointers_;
36 Safepoint::DeoptMode deopt_mode_;
37};
38
39
40#define __ masm()->
41
42bool LCodeGen::GenerateCode() {
43 LPhase phase("Z_Code generation", chunk());
44 DCHECK(is_unused());
45 status_ = GENERATING;
46
47 // Open a frame scope to indicate that there is a frame on the stack. The
48 // NONE indicates that the scope shouldn't actually generate code to set up
49 // the frame (that is done in GeneratePrologue).
50 FrameScope frame_scope(masm_, StackFrame::NONE);
51
52 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
53 GenerateJumpTable() && GenerateSafepointTable();
54 if (FLAG_enable_embedded_constant_pool && !rc) {
55 masm()->AbortConstantPoolBuilding();
56 }
57 return rc;
58}
59
60
61void LCodeGen::FinishCode(Handle<Code> code) {
62 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010063 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000064 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
65 PopulateDeoptimizationData(code);
66}
67
68
69void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
73 int count = 0;
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ stfd(DoubleRegister::from_code(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
80 count++;
81 }
82}
83
84
85void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
91 int count = 0;
92 while (!save_iterator.Done()) {
93 __ lfd(DoubleRegister::from_code(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
96 count++;
97 }
98}
99
100
101bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating());
103
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 // r4: Callee's JS function.
108 // cp: Callee's context.
109 // pp: Callee's constant pool pointer (if enabled)
110 // fp: Caller's frame pointer.
111 // lr: Caller's pc.
112 // ip: Our own function entry (required by the prologue)
113 }
114
115 int prologue_offset = masm_->pc_offset();
116
117 if (prologue_offset) {
118 // Prologue logic requires it's starting address in ip and the
119 // corresponding offset from the function entry.
120 prologue_offset += Instruction::kInstrSize;
121 __ addi(ip, ip, Operand(prologue_offset));
122 }
123 info()->set_prologue_offset(prologue_offset);
124 if (NeedsEagerFrame()) {
125 if (info()->IsStub()) {
126 __ StubPrologue(ip, prologue_offset);
127 } else {
128 __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
129 }
130 frame_is_built_ = true;
131 }
132
133 // Reserve space for the stack slots needed by the code.
134 int slots = GetStackSlotCount();
135 if (slots > 0) {
136 __ subi(sp, sp, Operand(slots * kPointerSize));
137 if (FLAG_debug_code) {
138 __ Push(r3, r4);
139 __ li(r0, Operand(slots));
140 __ mtctr(r0);
141 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
142 __ mov(r4, Operand(kSlotsZapValue));
143 Label loop;
144 __ bind(&loop);
145 __ StorePU(r4, MemOperand(r3, -kPointerSize));
146 __ bdnz(&loop);
147 __ Pop(r3, r4);
148 }
149 }
150
151 if (info()->saves_caller_doubles()) {
152 SaveCallerDoubles();
153 }
154 return !is_aborted();
155}
156
157
158void LCodeGen::DoPrologue(LPrologue* instr) {
159 Comment(";;; Prologue begin");
160
161 // Possibly allocate a local context.
162 if (info()->scope()->num_heap_slots() > 0) {
163 Comment(";;; Allocate local context");
164 bool need_write_barrier = true;
165 // Argument to NewContext is the function, which is in r4.
166 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
167 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
168 if (info()->scope()->is_script_scope()) {
169 __ push(r4);
170 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
171 __ CallRuntime(Runtime::kNewScriptContext);
172 deopt_mode = Safepoint::kLazyDeopt;
173 } else if (slots <= FastNewContextStub::kMaximumSlots) {
174 FastNewContextStub stub(isolate(), slots);
175 __ CallStub(&stub);
176 // Result of FastNewContextStub is always in new space.
177 need_write_barrier = false;
178 } else {
179 __ push(r4);
180 __ CallRuntime(Runtime::kNewFunctionContext);
181 }
182 RecordSafepoint(deopt_mode);
183
184 // Context is returned in both r3 and cp. It replaces the context
185 // passed to us. It's saved in the stack and kept live in cp.
186 __ mr(cp, r3);
187 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
188 // Copy any necessary parameters into the context.
189 int num_parameters = scope()->num_parameters();
190 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
191 for (int i = first_parameter; i < num_parameters; i++) {
192 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
193 if (var->IsContextSlot()) {
194 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195 (num_parameters - 1 - i) * kPointerSize;
196 // Load parameter from stack.
197 __ LoadP(r3, MemOperand(fp, parameter_offset));
198 // Store it in the context.
199 MemOperand target = ContextMemOperand(cp, var->index());
200 __ StoreP(r3, target, r0);
201 // Update the write barrier. This clobbers r6 and r3.
202 if (need_write_barrier) {
203 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
204 GetLinkRegisterState(), kSaveFPRegs);
205 } else if (FLAG_debug_code) {
206 Label done;
207 __ JumpIfInNewSpace(cp, r3, &done);
208 __ Abort(kExpectedNewSpaceObject);
209 __ bind(&done);
210 }
211 }
212 }
213 Comment(";;; End allocate local context");
214 }
215
216 Comment(";;; Prologue end");
217}
218
219
220void LCodeGen::GenerateOsrPrologue() {
221 // Generate the OSR entry prologue at the first unknown OSR value, or if there
222 // are none, at the OSR entrypoint instruction.
223 if (osr_pc_offset_ >= 0) return;
224
225 osr_pc_offset_ = masm()->pc_offset();
226
227 // Adjust the frame size, subsuming the unoptimized frame into the
228 // optimized frame.
229 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
230 DCHECK(slots >= 0);
231 __ subi(sp, sp, Operand(slots * kPointerSize));
232}
233
234
235void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
236 if (instr->IsCall()) {
237 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
238 }
239 if (!instr->IsLazyBailout() && !instr->IsGap()) {
240 safepoints_.BumpLastLazySafepointIndex();
241 }
242}
243
244
245bool LCodeGen::GenerateDeferredCode() {
246 DCHECK(is_generating());
247 if (deferred_.length() > 0) {
248 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
249 LDeferredCode* code = deferred_[i];
250
251 HValue* value =
252 instructions_->at(code->instruction_index())->hydrogen_value();
253 RecordAndWritePosition(
254 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
255
256 Comment(
257 ";;; <@%d,#%d> "
258 "-------------------- Deferred %s --------------------",
259 code->instruction_index(), code->instr()->hydrogen_value()->id(),
260 code->instr()->Mnemonic());
261 __ bind(code->entry());
262 if (NeedsDeferredFrame()) {
263 Comment(";;; Build frame");
264 DCHECK(!frame_is_built_);
265 DCHECK(info()->IsStub());
266 frame_is_built_ = true;
267 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
268 __ PushFixedFrame(scratch0());
269 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
270 Comment(";;; Deferred code");
271 }
272 code->Generate();
273 if (NeedsDeferredFrame()) {
274 Comment(";;; Destroy frame");
275 DCHECK(frame_is_built_);
276 __ PopFixedFrame(ip);
277 frame_is_built_ = false;
278 }
279 __ b(code->exit());
280 }
281 }
282
283 return !is_aborted();
284}
285
286
287bool LCodeGen::GenerateJumpTable() {
288 // Check that the jump table is accessible from everywhere in the function
289 // code, i.e. that offsets to the table can be encoded in the 24bit signed
290 // immediate of a branch instruction.
291 // To simplify we consider the code size from the first instruction to the
292 // end of the jump table. We also don't consider the pc load delta.
293 // Each entry in the jump table generates one instruction and inlines one
294 // 32bit data after it.
295 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
296 jump_table_.length() * 7)) {
297 Abort(kGeneratedCodeIsTooLarge);
298 }
299
300 if (jump_table_.length() > 0) {
301 Label needs_frame, call_deopt_entry;
302
303 Comment(";;; -------------------- Jump table --------------------");
304 Address base = jump_table_[0].address;
305
306 Register entry_offset = scratch0();
307
308 int length = jump_table_.length();
309 for (int i = 0; i < length; i++) {
310 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
311 __ bind(&table_entry->label);
312
313 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
314 Address entry = table_entry->address;
315 DeoptComment(table_entry->deopt_info);
316
317 // Second-level deopt table entries are contiguous and small, so instead
318 // of loading the full, absolute address of each one, load an immediate
319 // offset which will be added to the base address later.
320 __ mov(entry_offset, Operand(entry - base));
321
322 if (table_entry->needs_frame) {
323 DCHECK(!info()->saves_caller_doubles());
324 Comment(";;; call deopt with frame");
325 __ PushFixedFrame();
326 __ b(&needs_frame, SetLK);
327 } else {
328 __ b(&call_deopt_entry, SetLK);
329 }
330 info()->LogDeoptCallPosition(masm()->pc_offset(),
331 table_entry->deopt_info.inlining_id);
332 }
333
334 if (needs_frame.is_linked()) {
335 __ bind(&needs_frame);
336 // This variant of deopt can only be used with stubs. Since we don't
337 // have a function pointer to install in the stack frame that we're
338 // building, install a special marker there instead.
339 DCHECK(info()->IsStub());
340 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
341 __ push(ip);
342 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
343 }
344
345 Comment(";;; call deopt");
346 __ bind(&call_deopt_entry);
347
348 if (info()->saves_caller_doubles()) {
349 DCHECK(info()->IsStub());
350 RestoreCallerDoubles();
351 }
352
353 // Add the base address to the offset previously loaded in entry_offset.
354 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
355 __ add(ip, entry_offset, ip);
356 __ Jump(ip);
357 }
358
359 // The deoptimization jump table is the last part of the instruction
360 // sequence. Mark the generated code as done unless we bailed out.
361 if (!is_aborted()) status_ = DONE;
362 return !is_aborted();
363}
364
365
366bool LCodeGen::GenerateSafepointTable() {
367 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100368 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 return !is_aborted();
370}
371
372
373Register LCodeGen::ToRegister(int code) const {
374 return Register::from_code(code);
375}
376
377
378DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
379 return DoubleRegister::from_code(code);
380}
381
382
383Register LCodeGen::ToRegister(LOperand* op) const {
384 DCHECK(op->IsRegister());
385 return ToRegister(op->index());
386}
387
388
389Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
390 if (op->IsRegister()) {
391 return ToRegister(op->index());
392 } else if (op->IsConstantOperand()) {
393 LConstantOperand* const_op = LConstantOperand::cast(op);
394 HConstant* constant = chunk_->LookupConstant(const_op);
395 Handle<Object> literal = constant->handle(isolate());
396 Representation r = chunk_->LookupLiteralRepresentation(const_op);
397 if (r.IsInteger32()) {
398 AllowDeferredHandleDereference get_number;
399 DCHECK(literal->IsNumber());
400 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
401 } else if (r.IsDouble()) {
402 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
403 } else {
404 DCHECK(r.IsSmiOrTagged());
405 __ Move(scratch, literal);
406 }
407 return scratch;
408 } else if (op->IsStackSlot()) {
409 __ LoadP(scratch, ToMemOperand(op));
410 return scratch;
411 }
412 UNREACHABLE();
413 return scratch;
414}
415
416
417void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
418 Register dst) {
419 DCHECK(IsInteger32(const_op));
420 HConstant* constant = chunk_->LookupConstant(const_op);
421 int32_t value = constant->Integer32Value();
422 if (IsSmi(const_op)) {
423 __ LoadSmiLiteral(dst, Smi::FromInt(value));
424 } else {
425 __ LoadIntLiteral(dst, value);
426 }
427}
428
429
430DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
431 DCHECK(op->IsDoubleRegister());
432 return ToDoubleRegister(op->index());
433}
434
435
436Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
437 HConstant* constant = chunk_->LookupConstant(op);
438 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
439 return constant->handle(isolate());
440}
441
442
443bool LCodeGen::IsInteger32(LConstantOperand* op) const {
444 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
445}
446
447
448bool LCodeGen::IsSmi(LConstantOperand* op) const {
449 return chunk_->LookupLiteralRepresentation(op).IsSmi();
450}
451
452
453int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
454 return ToRepresentation(op, Representation::Integer32());
455}
456
457
458intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
459 const Representation& r) const {
460 HConstant* constant = chunk_->LookupConstant(op);
461 int32_t value = constant->Integer32Value();
462 if (r.IsInteger32()) return value;
463 DCHECK(r.IsSmiOrTagged());
464 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
465}
466
467
468Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
469 HConstant* constant = chunk_->LookupConstant(op);
470 return Smi::FromInt(constant->Integer32Value());
471}
472
473
474double LCodeGen::ToDouble(LConstantOperand* op) const {
475 HConstant* constant = chunk_->LookupConstant(op);
476 DCHECK(constant->HasDoubleValue());
477 return constant->DoubleValue();
478}
479
480
481Operand LCodeGen::ToOperand(LOperand* op) {
482 if (op->IsConstantOperand()) {
483 LConstantOperand* const_op = LConstantOperand::cast(op);
484 HConstant* constant = chunk()->LookupConstant(const_op);
485 Representation r = chunk_->LookupLiteralRepresentation(const_op);
486 if (r.IsSmi()) {
487 DCHECK(constant->HasSmiValue());
488 return Operand(Smi::FromInt(constant->Integer32Value()));
489 } else if (r.IsInteger32()) {
490 DCHECK(constant->HasInteger32Value());
491 return Operand(constant->Integer32Value());
492 } else if (r.IsDouble()) {
493 Abort(kToOperandUnsupportedDoubleImmediate);
494 }
495 DCHECK(r.IsTagged());
496 return Operand(constant->handle(isolate()));
497 } else if (op->IsRegister()) {
498 return Operand(ToRegister(op));
499 } else if (op->IsDoubleRegister()) {
500 Abort(kToOperandIsDoubleRegisterUnimplemented);
501 return Operand::Zero();
502 }
503 // Stack slots not implemented, use ToMemOperand instead.
504 UNREACHABLE();
505 return Operand::Zero();
506}
507
508
509static int ArgumentsOffsetWithoutFrame(int index) {
510 DCHECK(index < 0);
511 return -(index + 1) * kPointerSize;
512}
513
514
515MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
516 DCHECK(!op->IsRegister());
517 DCHECK(!op->IsDoubleRegister());
518 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
519 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100520 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000521 } else {
522 // Retrieve parameter without eager stack-frame relative to the
523 // stack-pointer.
524 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
525 }
526}
527
528
529MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
530 DCHECK(op->IsDoubleStackSlot());
531 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100532 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533 } else {
534 // Retrieve parameter without eager stack-frame relative to the
535 // stack-pointer.
536 return MemOperand(sp,
537 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
538 }
539}
540
541
542void LCodeGen::WriteTranslation(LEnvironment* environment,
543 Translation* translation) {
544 if (environment == NULL) return;
545
546 // The translation includes one command per value in the environment.
547 int translation_size = environment->translation_size();
548
549 WriteTranslation(environment->outer(), translation);
550 WriteTranslationFrame(environment, translation);
551
552 int object_index = 0;
553 int dematerialized_index = 0;
554 for (int i = 0; i < translation_size; ++i) {
555 LOperand* value = environment->values()->at(i);
556 AddToTranslation(
557 environment, translation, value, environment->HasTaggedValueAt(i),
558 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
559 }
560}
561
562
563void LCodeGen::AddToTranslation(LEnvironment* environment,
564 Translation* translation, LOperand* op,
565 bool is_tagged, bool is_uint32,
566 int* object_index_pointer,
567 int* dematerialized_index_pointer) {
568 if (op == LEnvironment::materialization_marker()) {
569 int object_index = (*object_index_pointer)++;
570 if (environment->ObjectIsDuplicateAt(object_index)) {
571 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
572 translation->DuplicateObject(dupe_of);
573 return;
574 }
575 int object_length = environment->ObjectLengthAt(object_index);
576 if (environment->ObjectIsArgumentsAt(object_index)) {
577 translation->BeginArgumentsObject(object_length);
578 } else {
579 translation->BeginCapturedObject(object_length);
580 }
581 int dematerialized_index = *dematerialized_index_pointer;
582 int env_offset = environment->translation_size() + dematerialized_index;
583 *dematerialized_index_pointer += object_length;
584 for (int i = 0; i < object_length; ++i) {
585 LOperand* value = environment->values()->at(env_offset + i);
586 AddToTranslation(environment, translation, value,
587 environment->HasTaggedValueAt(env_offset + i),
588 environment->HasUint32ValueAt(env_offset + i),
589 object_index_pointer, dematerialized_index_pointer);
590 }
591 return;
592 }
593
594 if (op->IsStackSlot()) {
595 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596 if (is_tagged) {
597 translation->StoreStackSlot(index);
598 } else if (is_uint32) {
599 translation->StoreUint32StackSlot(index);
600 } else {
601 translation->StoreInt32StackSlot(index);
602 }
603 } else if (op->IsDoubleStackSlot()) {
604 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000605 translation->StoreDoubleStackSlot(index);
606 } else if (op->IsRegister()) {
607 Register reg = ToRegister(op);
608 if (is_tagged) {
609 translation->StoreRegister(reg);
610 } else if (is_uint32) {
611 translation->StoreUint32Register(reg);
612 } else {
613 translation->StoreInt32Register(reg);
614 }
615 } else if (op->IsDoubleRegister()) {
616 DoubleRegister reg = ToDoubleRegister(op);
617 translation->StoreDoubleRegister(reg);
618 } else if (op->IsConstantOperand()) {
619 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
620 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
621 translation->StoreLiteral(src_index);
622 } else {
623 UNREACHABLE();
624 }
625}
626
627
628void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
629 LInstruction* instr) {
630 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
631}
632
633
634void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
635 LInstruction* instr,
636 SafepointMode safepoint_mode) {
637 DCHECK(instr != NULL);
638 __ Call(code, mode);
639 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
640
641 // Signal that we don't inline smi code before these stubs in the
642 // optimizing code generator.
643 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
644 __ nop();
645 }
646}
647
648
649void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
650 LInstruction* instr, SaveFPRegsMode save_doubles) {
651 DCHECK(instr != NULL);
652
653 __ CallRuntime(function, num_arguments, save_doubles);
654
655 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
656}
657
658
659void LCodeGen::LoadContextFromDeferred(LOperand* context) {
660 if (context->IsRegister()) {
661 __ Move(cp, ToRegister(context));
662 } else if (context->IsStackSlot()) {
663 __ LoadP(cp, ToMemOperand(context));
664 } else if (context->IsConstantOperand()) {
665 HConstant* constant =
666 chunk_->LookupConstant(LConstantOperand::cast(context));
667 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
668 } else {
669 UNREACHABLE();
670 }
671}
672
673
674void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
675 LInstruction* instr, LOperand* context) {
676 LoadContextFromDeferred(context);
677 __ CallRuntimeSaveDoubles(id);
678 RecordSafepointWithRegisters(instr->pointer_map(), argc,
679 Safepoint::kNoLazyDeopt);
680}
681
682
683void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
684 Safepoint::DeoptMode mode) {
685 environment->set_has_been_used();
686 if (!environment->HasBeenRegistered()) {
687 // Physical stack frame layout:
688 // -x ............. -4 0 ..................................... y
689 // [incoming arguments] [spill slots] [pushed outgoing arguments]
690
691 // Layout of the environment:
692 // 0 ..................................................... size-1
693 // [parameters] [locals] [expression stack including arguments]
694
695 // Layout of the translation:
696 // 0 ........................................................ size - 1 + 4
697 // [expression stack including arguments] [locals] [4 words] [parameters]
698 // |>------------ translation_size ------------<|
699
700 int frame_count = 0;
701 int jsframe_count = 0;
702 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
703 ++frame_count;
704 if (e->frame_type() == JS_FUNCTION) {
705 ++jsframe_count;
706 }
707 }
708 Translation translation(&translations_, frame_count, jsframe_count, zone());
709 WriteTranslation(environment, &translation);
710 int deoptimization_index = deoptimizations_.length();
711 int pc_offset = masm()->pc_offset();
712 environment->Register(deoptimization_index, translation.index(),
713 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
714 deoptimizations_.Add(environment, zone());
715 }
716}
717
718
719void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
720 Deoptimizer::DeoptReason deopt_reason,
721 Deoptimizer::BailoutType bailout_type,
722 CRegister cr) {
723 LEnvironment* environment = instr->environment();
724 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
725 DCHECK(environment->HasBeenRegistered());
726 int id = environment->deoptimization_index();
727 Address entry =
728 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
729 if (entry == NULL) {
730 Abort(kBailoutWasNotPrepared);
731 return;
732 }
733
734 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
735 CRegister alt_cr = cr6;
736 Register scratch = scratch0();
737 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
738 Label no_deopt;
739 DCHECK(!alt_cr.is(cr));
740 __ Push(r4, scratch);
741 __ mov(scratch, Operand(count));
742 __ lwz(r4, MemOperand(scratch));
743 __ subi(r4, r4, Operand(1));
744 __ cmpi(r4, Operand::Zero(), alt_cr);
745 __ bne(&no_deopt, alt_cr);
746 __ li(r4, Operand(FLAG_deopt_every_n_times));
747 __ stw(r4, MemOperand(scratch));
748 __ Pop(r4, scratch);
749
750 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
751 __ bind(&no_deopt);
752 __ stw(r4, MemOperand(scratch));
753 __ Pop(r4, scratch);
754 }
755
756 if (info()->ShouldTrapOnDeopt()) {
757 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
758 }
759
760 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
761
762 DCHECK(info()->IsStub() || frame_is_built_);
763 // Go through jump table if we need to handle condition, build frame, or
764 // restore caller doubles.
765 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
766 DeoptComment(deopt_info);
767 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
768 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
769 } else {
770 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
771 !frame_is_built_);
772 // We often have several deopts to the same entry, reuse the last
773 // jump entry if this is the case.
774 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
775 jump_table_.is_empty() ||
776 !table_entry.IsEquivalentTo(jump_table_.last())) {
777 jump_table_.Add(table_entry, zone());
778 }
779 __ b(cond, &jump_table_.last().label, cr);
780 }
781}
782
783
784void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
785 Deoptimizer::DeoptReason deopt_reason,
786 CRegister cr) {
787 Deoptimizer::BailoutType bailout_type =
788 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
789 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
790}
791
792
793void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
794 SafepointMode safepoint_mode) {
795 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
796 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
797 } else {
798 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
799 RecordSafepointWithRegisters(instr->pointer_map(), 0,
800 Safepoint::kLazyDeopt);
801 }
802}
803
804
805void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
806 int arguments, Safepoint::DeoptMode deopt_mode) {
807 DCHECK(expected_safepoint_kind_ == kind);
808
809 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
810 Safepoint safepoint =
811 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
812 for (int i = 0; i < operands->length(); i++) {
813 LOperand* pointer = operands->at(i);
814 if (pointer->IsStackSlot()) {
815 safepoint.DefinePointerSlot(pointer->index(), zone());
816 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
817 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
818 }
819 }
820}
821
822
823void LCodeGen::RecordSafepoint(LPointerMap* pointers,
824 Safepoint::DeoptMode deopt_mode) {
825 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
826}
827
828
829void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
830 LPointerMap empty_pointers(zone());
831 RecordSafepoint(&empty_pointers, deopt_mode);
832}
833
834
835void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
836 int arguments,
837 Safepoint::DeoptMode deopt_mode) {
838 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
839}
840
841
842void LCodeGen::RecordAndWritePosition(int position) {
843 if (position == RelocInfo::kNoPosition) return;
844 masm()->positions_recorder()->RecordPosition(position);
845 masm()->positions_recorder()->WriteRecordedPositions();
846}
847
848
849static const char* LabelType(LLabel* label) {
850 if (label->is_loop_header()) return " (loop header)";
851 if (label->is_osr_entry()) return " (OSR entry)";
852 return "";
853}
854
855
856void LCodeGen::DoLabel(LLabel* label) {
857 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
858 current_instruction_, label->hydrogen_value()->id(),
859 label->block_id(), LabelType(label));
860 __ bind(label->label());
861 current_block_ = label->block_id();
862 DoGap(label);
863}
864
865
866void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
867
868
869void LCodeGen::DoGap(LGap* gap) {
870 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
871 i++) {
872 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
873 LParallelMove* move = gap->GetParallelMove(inner_pos);
874 if (move != NULL) DoParallelMove(move);
875 }
876}
877
878
879void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
880
881
882void LCodeGen::DoParameter(LParameter* instr) {
883 // Nothing to do.
884}
885
886
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000887void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
888 GenerateOsrPrologue();
889}
890
891
892void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
893 Register dividend = ToRegister(instr->dividend());
894 int32_t divisor = instr->divisor();
895 DCHECK(dividend.is(ToRegister(instr->result())));
896
897 // Theoretically, a variation of the branch-free code for integer division by
898 // a power of 2 (calculating the remainder via an additional multiplication
899 // (which gets simplified to an 'and') and subtraction) should be faster, and
900 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
901 // indicate that positive dividends are heavily favored, so the branching
902 // version performs better.
903 HMod* hmod = instr->hydrogen();
904 int32_t shift = WhichPowerOf2Abs(divisor);
905 Label dividend_is_not_negative, done;
906 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
907 __ cmpwi(dividend, Operand::Zero());
908 __ bge(&dividend_is_not_negative);
909 if (shift) {
910 // Note that this is correct even for kMinInt operands.
911 __ neg(dividend, dividend);
912 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
913 __ neg(dividend, dividend, LeaveOE, SetRC);
914 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
915 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
916 }
917 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
918 __ li(dividend, Operand::Zero());
919 } else {
920 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
921 }
922 __ b(&done);
923 }
924
925 __ bind(&dividend_is_not_negative);
926 if (shift) {
927 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
928 } else {
929 __ li(dividend, Operand::Zero());
930 }
931 __ bind(&done);
932}
933
934
935void LCodeGen::DoModByConstI(LModByConstI* instr) {
936 Register dividend = ToRegister(instr->dividend());
937 int32_t divisor = instr->divisor();
938 Register result = ToRegister(instr->result());
939 DCHECK(!dividend.is(result));
940
941 if (divisor == 0) {
942 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
943 return;
944 }
945
946 __ TruncatingDiv(result, dividend, Abs(divisor));
947 __ mov(ip, Operand(Abs(divisor)));
948 __ mullw(result, result, ip);
949 __ sub(result, dividend, result, LeaveOE, SetRC);
950
951 // Check for negative zero.
952 HMod* hmod = instr->hydrogen();
953 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
954 Label remainder_not_zero;
955 __ bne(&remainder_not_zero, cr0);
956 __ cmpwi(dividend, Operand::Zero());
957 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
958 __ bind(&remainder_not_zero);
959 }
960}
961
962
963void LCodeGen::DoModI(LModI* instr) {
964 HMod* hmod = instr->hydrogen();
965 Register left_reg = ToRegister(instr->left());
966 Register right_reg = ToRegister(instr->right());
967 Register result_reg = ToRegister(instr->result());
968 Register scratch = scratch0();
969 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
970 Label done;
971
972 if (can_overflow) {
973 __ li(r0, Operand::Zero()); // clear xer
974 __ mtxer(r0);
975 }
976
977 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
978
979 // Check for x % 0.
980 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
981 __ cmpwi(right_reg, Operand::Zero());
982 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
983 }
984
985 // Check for kMinInt % -1, divw will return undefined, which is not what we
986 // want. We have to deopt if we care about -0, because we can't return that.
987 if (can_overflow) {
988 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
989 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
990 } else {
991 if (CpuFeatures::IsSupported(ISELECT)) {
992 __ isel(overflow, result_reg, r0, result_reg, cr0);
993 __ boverflow(&done, cr0);
994 } else {
995 Label no_overflow_possible;
996 __ bnooverflow(&no_overflow_possible, cr0);
997 __ li(result_reg, Operand::Zero());
998 __ b(&done);
999 __ bind(&no_overflow_possible);
1000 }
1001 }
1002 }
1003
1004 __ mullw(scratch, right_reg, scratch);
1005 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1006
1007 // If we care about -0, test if the dividend is <0 and the result is 0.
1008 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1009 __ bne(&done, cr0);
1010 __ cmpwi(left_reg, Operand::Zero());
1011 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1012 }
1013
1014 __ bind(&done);
1015}
1016
1017
1018void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1019 Register dividend = ToRegister(instr->dividend());
1020 int32_t divisor = instr->divisor();
1021 Register result = ToRegister(instr->result());
1022 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1023 DCHECK(!result.is(dividend));
1024
1025 // Check for (0 / -x) that will produce negative zero.
1026 HDiv* hdiv = instr->hydrogen();
1027 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1028 __ cmpwi(dividend, Operand::Zero());
1029 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1030 }
1031 // Check for (kMinInt / -1).
1032 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1033 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1034 __ cmpw(dividend, r0);
1035 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1036 }
1037
1038 int32_t shift = WhichPowerOf2Abs(divisor);
1039
1040 // Deoptimize if remainder will not be 0.
1041 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1042 __ TestBitRange(dividend, shift - 1, 0, r0);
1043 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1044 }
1045
1046 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1047 __ neg(result, dividend);
1048 return;
1049 }
1050 if (shift == 0) {
1051 __ mr(result, dividend);
1052 } else {
1053 if (shift == 1) {
1054 __ srwi(result, dividend, Operand(31));
1055 } else {
1056 __ srawi(result, dividend, 31);
1057 __ srwi(result, result, Operand(32 - shift));
1058 }
1059 __ add(result, dividend, result);
1060 __ srawi(result, result, shift);
1061 }
1062 if (divisor < 0) __ neg(result, result);
1063}
1064
1065
1066void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1067 Register dividend = ToRegister(instr->dividend());
1068 int32_t divisor = instr->divisor();
1069 Register result = ToRegister(instr->result());
1070 DCHECK(!dividend.is(result));
1071
1072 if (divisor == 0) {
1073 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1074 return;
1075 }
1076
1077 // Check for (0 / -x) that will produce negative zero.
1078 HDiv* hdiv = instr->hydrogen();
1079 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1080 __ cmpwi(dividend, Operand::Zero());
1081 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1082 }
1083
1084 __ TruncatingDiv(result, dividend, Abs(divisor));
1085 if (divisor < 0) __ neg(result, result);
1086
1087 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1088 Register scratch = scratch0();
1089 __ mov(ip, Operand(divisor));
1090 __ mullw(scratch, result, ip);
1091 __ cmpw(scratch, dividend);
1092 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1093 }
1094}
1095
1096
1097// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1098void LCodeGen::DoDivI(LDivI* instr) {
1099 HBinaryOperation* hdiv = instr->hydrogen();
1100 const Register dividend = ToRegister(instr->dividend());
1101 const Register divisor = ToRegister(instr->divisor());
1102 Register result = ToRegister(instr->result());
1103 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1104
1105 DCHECK(!dividend.is(result));
1106 DCHECK(!divisor.is(result));
1107
1108 if (can_overflow) {
1109 __ li(r0, Operand::Zero()); // clear xer
1110 __ mtxer(r0);
1111 }
1112
1113 __ divw(result, dividend, divisor, SetOE, SetRC);
1114
1115 // Check for x / 0.
1116 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1117 __ cmpwi(divisor, Operand::Zero());
1118 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1119 }
1120
1121 // Check for (0 / -x) that will produce negative zero.
1122 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1123 Label dividend_not_zero;
1124 __ cmpwi(dividend, Operand::Zero());
1125 __ bne(&dividend_not_zero);
1126 __ cmpwi(divisor, Operand::Zero());
1127 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1128 __ bind(&dividend_not_zero);
1129 }
1130
1131 // Check for (kMinInt / -1).
1132 if (can_overflow) {
1133 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1134 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1135 } else {
1136 // When truncating, we want kMinInt / -1 = kMinInt.
1137 if (CpuFeatures::IsSupported(ISELECT)) {
1138 __ isel(overflow, result, dividend, result, cr0);
1139 } else {
1140 Label no_overflow_possible;
1141 __ bnooverflow(&no_overflow_possible, cr0);
1142 __ mr(result, dividend);
1143 __ bind(&no_overflow_possible);
1144 }
1145 }
1146 }
1147
1148 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1149 // Deoptimize if remainder is not 0.
1150 Register scratch = scratch0();
1151 __ mullw(scratch, divisor, result);
1152 __ cmpw(dividend, scratch);
1153 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1154 }
1155}
1156
1157
1158void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1159 HBinaryOperation* hdiv = instr->hydrogen();
1160 Register dividend = ToRegister(instr->dividend());
1161 Register result = ToRegister(instr->result());
1162 int32_t divisor = instr->divisor();
1163 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1164
1165 // If the divisor is positive, things are easy: There can be no deopts and we
1166 // can simply do an arithmetic right shift.
1167 int32_t shift = WhichPowerOf2Abs(divisor);
1168 if (divisor > 0) {
1169 if (shift || !result.is(dividend)) {
1170 __ srawi(result, dividend, shift);
1171 }
1172 return;
1173 }
1174
1175 // If the divisor is negative, we have to negate and handle edge cases.
1176 OEBit oe = LeaveOE;
1177#if V8_TARGET_ARCH_PPC64
1178 if (divisor == -1 && can_overflow) {
1179 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1180 __ cmpw(dividend, r0);
1181 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1182 }
1183#else
1184 if (can_overflow) {
1185 __ li(r0, Operand::Zero()); // clear xer
1186 __ mtxer(r0);
1187 oe = SetOE;
1188 }
1189#endif
1190
1191 __ neg(result, dividend, oe, SetRC);
1192 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1193 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1194 }
1195
1196// If the negation could not overflow, simply shifting is OK.
1197#if !V8_TARGET_ARCH_PPC64
1198 if (!can_overflow) {
1199#endif
1200 if (shift) {
1201 __ ShiftRightArithImm(result, result, shift);
1202 }
1203 return;
1204#if !V8_TARGET_ARCH_PPC64
1205 }
1206
1207 // Dividing by -1 is basically negation, unless we overflow.
1208 if (divisor == -1) {
1209 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1210 return;
1211 }
1212
1213 Label overflow, done;
1214 __ boverflow(&overflow, cr0);
1215 __ srawi(result, result, shift);
1216 __ b(&done);
1217 __ bind(&overflow);
1218 __ mov(result, Operand(kMinInt / divisor));
1219 __ bind(&done);
1220#endif
1221}
1222
1223
1224void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1225 Register dividend = ToRegister(instr->dividend());
1226 int32_t divisor = instr->divisor();
1227 Register result = ToRegister(instr->result());
1228 DCHECK(!dividend.is(result));
1229
1230 if (divisor == 0) {
1231 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1232 return;
1233 }
1234
1235 // Check for (0 / -x) that will produce negative zero.
1236 HMathFloorOfDiv* hdiv = instr->hydrogen();
1237 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1238 __ cmpwi(dividend, Operand::Zero());
1239 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1240 }
1241
1242 // Easy case: We need no dynamic check for the dividend and the flooring
1243 // division is the same as the truncating division.
1244 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1245 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1246 __ TruncatingDiv(result, dividend, Abs(divisor));
1247 if (divisor < 0) __ neg(result, result);
1248 return;
1249 }
1250
1251 // In the general case we may need to adjust before and after the truncating
1252 // division to get a flooring division.
1253 Register temp = ToRegister(instr->temp());
1254 DCHECK(!temp.is(dividend) && !temp.is(result));
1255 Label needs_adjustment, done;
1256 __ cmpwi(dividend, Operand::Zero());
1257 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1258 __ TruncatingDiv(result, dividend, Abs(divisor));
1259 if (divisor < 0) __ neg(result, result);
1260 __ b(&done);
1261 __ bind(&needs_adjustment);
1262 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1263 __ TruncatingDiv(result, temp, Abs(divisor));
1264 if (divisor < 0) __ neg(result, result);
1265 __ subi(result, result, Operand(1));
1266 __ bind(&done);
1267}
1268
1269
1270// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1271void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1272 HBinaryOperation* hdiv = instr->hydrogen();
1273 const Register dividend = ToRegister(instr->dividend());
1274 const Register divisor = ToRegister(instr->divisor());
1275 Register result = ToRegister(instr->result());
1276 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1277
1278 DCHECK(!dividend.is(result));
1279 DCHECK(!divisor.is(result));
1280
1281 if (can_overflow) {
1282 __ li(r0, Operand::Zero()); // clear xer
1283 __ mtxer(r0);
1284 }
1285
1286 __ divw(result, dividend, divisor, SetOE, SetRC);
1287
1288 // Check for x / 0.
1289 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1290 __ cmpwi(divisor, Operand::Zero());
1291 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1292 }
1293
1294 // Check for (0 / -x) that will produce negative zero.
1295 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1296 Label dividend_not_zero;
1297 __ cmpwi(dividend, Operand::Zero());
1298 __ bne(&dividend_not_zero);
1299 __ cmpwi(divisor, Operand::Zero());
1300 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1301 __ bind(&dividend_not_zero);
1302 }
1303
1304 // Check for (kMinInt / -1).
1305 if (can_overflow) {
1306 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1307 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1308 } else {
1309 // When truncating, we want kMinInt / -1 = kMinInt.
1310 if (CpuFeatures::IsSupported(ISELECT)) {
1311 __ isel(overflow, result, dividend, result, cr0);
1312 } else {
1313 Label no_overflow_possible;
1314 __ bnooverflow(&no_overflow_possible, cr0);
1315 __ mr(result, dividend);
1316 __ bind(&no_overflow_possible);
1317 }
1318 }
1319 }
1320
1321 Label done;
1322 Register scratch = scratch0();
1323// If both operands have the same sign then we are done.
1324#if V8_TARGET_ARCH_PPC64
1325 __ xor_(scratch, dividend, divisor);
1326 __ cmpwi(scratch, Operand::Zero());
1327 __ bge(&done);
1328#else
1329 __ xor_(scratch, dividend, divisor, SetRC);
1330 __ bge(&done, cr0);
1331#endif
1332
1333 // If there is no remainder then we are done.
1334 __ mullw(scratch, divisor, result);
1335 __ cmpw(dividend, scratch);
1336 __ beq(&done);
1337
1338 // We performed a truncating division. Correct the result.
1339 __ subi(result, result, Operand(1));
1340 __ bind(&done);
1341}
1342
1343
1344void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1345 DoubleRegister addend = ToDoubleRegister(instr->addend());
1346 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1347 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1348 DoubleRegister result = ToDoubleRegister(instr->result());
1349
1350 __ fmadd(result, multiplier, multiplicand, addend);
1351}
1352
1353
1354void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1355 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1356 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1357 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1358 DoubleRegister result = ToDoubleRegister(instr->result());
1359
1360 __ fmsub(result, multiplier, multiplicand, minuend);
1361}
1362
1363
1364void LCodeGen::DoMulI(LMulI* instr) {
1365 Register scratch = scratch0();
1366 Register result = ToRegister(instr->result());
1367 // Note that result may alias left.
1368 Register left = ToRegister(instr->left());
1369 LOperand* right_op = instr->right();
1370
1371 bool bailout_on_minus_zero =
1372 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1373 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1374
1375 if (right_op->IsConstantOperand()) {
1376 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1377
1378 if (bailout_on_minus_zero && (constant < 0)) {
1379 // The case of a null constant will be handled separately.
1380 // If constant is negative and left is null, the result should be -0.
1381 __ cmpi(left, Operand::Zero());
1382 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1383 }
1384
1385 switch (constant) {
1386 case -1:
1387 if (can_overflow) {
1388#if V8_TARGET_ARCH_PPC64
1389 if (instr->hydrogen()->representation().IsSmi()) {
1390#endif
1391 __ li(r0, Operand::Zero()); // clear xer
1392 __ mtxer(r0);
1393 __ neg(result, left, SetOE, SetRC);
1394 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1395#if V8_TARGET_ARCH_PPC64
1396 } else {
1397 __ neg(result, left);
1398 __ TestIfInt32(result, r0);
1399 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1400 }
1401#endif
1402 } else {
1403 __ neg(result, left);
1404 }
1405 break;
1406 case 0:
1407 if (bailout_on_minus_zero) {
1408// If left is strictly negative and the constant is null, the
1409// result is -0. Deoptimize if required, otherwise return 0.
1410#if V8_TARGET_ARCH_PPC64
1411 if (instr->hydrogen()->representation().IsSmi()) {
1412#endif
1413 __ cmpi(left, Operand::Zero());
1414#if V8_TARGET_ARCH_PPC64
1415 } else {
1416 __ cmpwi(left, Operand::Zero());
1417 }
1418#endif
1419 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1420 }
1421 __ li(result, Operand::Zero());
1422 break;
1423 case 1:
1424 __ Move(result, left);
1425 break;
1426 default:
1427 // Multiplying by powers of two and powers of two plus or minus
1428 // one can be done faster with shifted operands.
1429 // For other constants we emit standard code.
1430 int32_t mask = constant >> 31;
1431 uint32_t constant_abs = (constant + mask) ^ mask;
1432
1433 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1434 int32_t shift = WhichPowerOf2(constant_abs);
1435 __ ShiftLeftImm(result, left, Operand(shift));
1436 // Correct the sign of the result if the constant is negative.
1437 if (constant < 0) __ neg(result, result);
1438 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1439 int32_t shift = WhichPowerOf2(constant_abs - 1);
1440 __ ShiftLeftImm(scratch, left, Operand(shift));
1441 __ add(result, scratch, left);
1442 // Correct the sign of the result if the constant is negative.
1443 if (constant < 0) __ neg(result, result);
1444 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1445 int32_t shift = WhichPowerOf2(constant_abs + 1);
1446 __ ShiftLeftImm(scratch, left, Operand(shift));
1447 __ sub(result, scratch, left);
1448 // Correct the sign of the result if the constant is negative.
1449 if (constant < 0) __ neg(result, result);
1450 } else {
1451 // Generate standard code.
1452 __ mov(ip, Operand(constant));
1453 __ Mul(result, left, ip);
1454 }
1455 }
1456
1457 } else {
1458 DCHECK(right_op->IsRegister());
1459 Register right = ToRegister(right_op);
1460
1461 if (can_overflow) {
1462#if V8_TARGET_ARCH_PPC64
1463 // result = left * right.
1464 if (instr->hydrogen()->representation().IsSmi()) {
1465 __ SmiUntag(result, left);
1466 __ SmiUntag(scratch, right);
1467 __ Mul(result, result, scratch);
1468 } else {
1469 __ Mul(result, left, right);
1470 }
1471 __ TestIfInt32(result, r0);
1472 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1473 if (instr->hydrogen()->representation().IsSmi()) {
1474 __ SmiTag(result);
1475 }
1476#else
1477 // scratch:result = left * right.
1478 if (instr->hydrogen()->representation().IsSmi()) {
1479 __ SmiUntag(result, left);
1480 __ mulhw(scratch, result, right);
1481 __ mullw(result, result, right);
1482 } else {
1483 __ mulhw(scratch, left, right);
1484 __ mullw(result, left, right);
1485 }
1486 __ TestIfInt32(scratch, result, r0);
1487 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1488#endif
1489 } else {
1490 if (instr->hydrogen()->representation().IsSmi()) {
1491 __ SmiUntag(result, left);
1492 __ Mul(result, result, right);
1493 } else {
1494 __ Mul(result, left, right);
1495 }
1496 }
1497
1498 if (bailout_on_minus_zero) {
1499 Label done;
1500#if V8_TARGET_ARCH_PPC64
1501 if (instr->hydrogen()->representation().IsSmi()) {
1502#endif
1503 __ xor_(r0, left, right, SetRC);
1504 __ bge(&done, cr0);
1505#if V8_TARGET_ARCH_PPC64
1506 } else {
1507 __ xor_(r0, left, right);
1508 __ cmpwi(r0, Operand::Zero());
1509 __ bge(&done);
1510 }
1511#endif
1512 // Bail out if the result is minus zero.
1513 __ cmpi(result, Operand::Zero());
1514 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1515 __ bind(&done);
1516 }
1517 }
1518}
1519
1520
1521void LCodeGen::DoBitI(LBitI* instr) {
1522 LOperand* left_op = instr->left();
1523 LOperand* right_op = instr->right();
1524 DCHECK(left_op->IsRegister());
1525 Register left = ToRegister(left_op);
1526 Register result = ToRegister(instr->result());
1527 Operand right(no_reg);
1528
1529 if (right_op->IsStackSlot()) {
1530 right = Operand(EmitLoadRegister(right_op, ip));
1531 } else {
1532 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1533 right = ToOperand(right_op);
1534
1535 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1536 switch (instr->op()) {
1537 case Token::BIT_AND:
1538 __ andi(result, left, right);
1539 break;
1540 case Token::BIT_OR:
1541 __ ori(result, left, right);
1542 break;
1543 case Token::BIT_XOR:
1544 __ xori(result, left, right);
1545 break;
1546 default:
1547 UNREACHABLE();
1548 break;
1549 }
1550 return;
1551 }
1552 }
1553
1554 switch (instr->op()) {
1555 case Token::BIT_AND:
1556 __ And(result, left, right);
1557 break;
1558 case Token::BIT_OR:
1559 __ Or(result, left, right);
1560 break;
1561 case Token::BIT_XOR:
1562 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1563 __ notx(result, left);
1564 } else {
1565 __ Xor(result, left, right);
1566 }
1567 break;
1568 default:
1569 UNREACHABLE();
1570 break;
1571 }
1572}
1573
1574
1575void LCodeGen::DoShiftI(LShiftI* instr) {
1576 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1577 // result may alias either of them.
1578 LOperand* right_op = instr->right();
1579 Register left = ToRegister(instr->left());
1580 Register result = ToRegister(instr->result());
1581 Register scratch = scratch0();
1582 if (right_op->IsRegister()) {
1583 // Mask the right_op operand.
1584 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1585 switch (instr->op()) {
1586 case Token::ROR:
1587 // rotate_right(a, b) == rotate_left(a, 32 - b)
1588 __ subfic(scratch, scratch, Operand(32));
1589 __ rotlw(result, left, scratch);
1590 break;
1591 case Token::SAR:
1592 __ sraw(result, left, scratch);
1593 break;
1594 case Token::SHR:
1595 if (instr->can_deopt()) {
1596 __ srw(result, left, scratch, SetRC);
1597#if V8_TARGET_ARCH_PPC64
1598 __ extsw(result, result, SetRC);
1599#endif
1600 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1601 } else {
1602 __ srw(result, left, scratch);
1603 }
1604 break;
1605 case Token::SHL:
1606 __ slw(result, left, scratch);
1607#if V8_TARGET_ARCH_PPC64
1608 __ extsw(result, result);
1609#endif
1610 break;
1611 default:
1612 UNREACHABLE();
1613 break;
1614 }
1615 } else {
1616 // Mask the right_op operand.
1617 int value = ToInteger32(LConstantOperand::cast(right_op));
1618 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1619 switch (instr->op()) {
1620 case Token::ROR:
1621 if (shift_count != 0) {
1622 __ rotrwi(result, left, shift_count);
1623 } else {
1624 __ Move(result, left);
1625 }
1626 break;
1627 case Token::SAR:
1628 if (shift_count != 0) {
1629 __ srawi(result, left, shift_count);
1630 } else {
1631 __ Move(result, left);
1632 }
1633 break;
1634 case Token::SHR:
1635 if (shift_count != 0) {
1636 __ srwi(result, left, Operand(shift_count));
1637 } else {
1638 if (instr->can_deopt()) {
1639 __ cmpwi(left, Operand::Zero());
1640 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1641 }
1642 __ Move(result, left);
1643 }
1644 break;
1645 case Token::SHL:
1646 if (shift_count != 0) {
1647#if V8_TARGET_ARCH_PPC64
1648 if (instr->hydrogen_value()->representation().IsSmi()) {
1649 __ sldi(result, left, Operand(shift_count));
1650#else
1651 if (instr->hydrogen_value()->representation().IsSmi() &&
1652 instr->can_deopt()) {
1653 if (shift_count != 1) {
1654 __ slwi(result, left, Operand(shift_count - 1));
1655 __ SmiTagCheckOverflow(result, result, scratch);
1656 } else {
1657 __ SmiTagCheckOverflow(result, left, scratch);
1658 }
1659 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1660#endif
1661 } else {
1662 __ slwi(result, left, Operand(shift_count));
1663#if V8_TARGET_ARCH_PPC64
1664 __ extsw(result, result);
1665#endif
1666 }
1667 } else {
1668 __ Move(result, left);
1669 }
1670 break;
1671 default:
1672 UNREACHABLE();
1673 break;
1674 }
1675 }
1676}
1677
1678
1679void LCodeGen::DoSubI(LSubI* instr) {
1680 LOperand* right = instr->right();
1681 Register left = ToRegister(instr->left());
1682 Register result = ToRegister(instr->result());
1683 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1684#if V8_TARGET_ARCH_PPC64
1685 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1686#else
1687 const bool isInteger = false;
1688#endif
1689 if (!can_overflow || isInteger) {
1690 if (right->IsConstantOperand()) {
1691 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1692 } else {
1693 __ sub(result, left, EmitLoadRegister(right, ip));
1694 }
1695#if V8_TARGET_ARCH_PPC64
1696 if (can_overflow) {
1697 __ TestIfInt32(result, r0);
1698 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1699 }
1700#endif
1701 } else {
1702 if (right->IsConstantOperand()) {
1703 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1704 scratch0(), r0);
1705 } else {
1706 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1707 scratch0(), r0);
1708 }
1709 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1710 }
1711}
1712
1713
1714void LCodeGen::DoRSubI(LRSubI* instr) {
1715 LOperand* left = instr->left();
1716 LOperand* right = instr->right();
1717 LOperand* result = instr->result();
1718
1719 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1720 right->IsConstantOperand());
1721
1722 Operand right_operand = ToOperand(right);
1723 if (is_int16(right_operand.immediate())) {
1724 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1725 } else {
1726 __ mov(r0, right_operand);
1727 __ sub(ToRegister(result), r0, ToRegister(left));
1728 }
1729}
1730
1731
1732void LCodeGen::DoConstantI(LConstantI* instr) {
1733 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1734}
1735
1736
1737void LCodeGen::DoConstantS(LConstantS* instr) {
1738 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1739}
1740
1741
1742void LCodeGen::DoConstantD(LConstantD* instr) {
1743 DCHECK(instr->result()->IsDoubleRegister());
1744 DoubleRegister result = ToDoubleRegister(instr->result());
1745#if V8_HOST_ARCH_IA32
1746 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1747 // builds.
1748 uint64_t bits = instr->bits();
1749 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1750 V8_UINT64_C(0x7FF0000000000000)) {
1751 uint32_t lo = static_cast<uint32_t>(bits);
1752 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1753 __ mov(ip, Operand(lo));
1754 __ mov(scratch0(), Operand(hi));
1755 __ MovInt64ToDouble(result, scratch0(), ip);
1756 return;
1757 }
1758#endif
1759 double v = instr->value();
1760 __ LoadDoubleLiteral(result, v, scratch0());
1761}
1762
1763
1764void LCodeGen::DoConstantE(LConstantE* instr) {
1765 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1766}
1767
1768
1769void LCodeGen::DoConstantT(LConstantT* instr) {
1770 Handle<Object> object = instr->value(isolate());
1771 AllowDeferredHandleDereference smi_check;
1772 __ Move(ToRegister(instr->result()), object);
1773}
1774
1775
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001776MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1777 String::Encoding encoding) {
1778 if (index->IsConstantOperand()) {
1779 int offset = ToInteger32(LConstantOperand::cast(index));
1780 if (encoding == String::TWO_BYTE_ENCODING) {
1781 offset *= kUC16Size;
1782 }
1783 STATIC_ASSERT(kCharSize == 1);
1784 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1785 }
1786 Register scratch = scratch0();
1787 DCHECK(!scratch.is(string));
1788 DCHECK(!scratch.is(ToRegister(index)));
1789 if (encoding == String::ONE_BYTE_ENCODING) {
1790 __ add(scratch, string, ToRegister(index));
1791 } else {
1792 STATIC_ASSERT(kUC16Size == 2);
1793 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1794 __ add(scratch, string, scratch);
1795 }
1796 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1797}
1798
1799
1800void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1801 String::Encoding encoding = instr->hydrogen()->encoding();
1802 Register string = ToRegister(instr->string());
1803 Register result = ToRegister(instr->result());
1804
1805 if (FLAG_debug_code) {
1806 Register scratch = scratch0();
1807 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1808 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1809
1810 __ andi(scratch, scratch,
1811 Operand(kStringRepresentationMask | kStringEncodingMask));
1812 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1813 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1814 __ cmpi(scratch,
1815 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1816 : two_byte_seq_type));
1817 __ Check(eq, kUnexpectedStringType);
1818 }
1819
1820 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1821 if (encoding == String::ONE_BYTE_ENCODING) {
1822 __ lbz(result, operand);
1823 } else {
1824 __ lhz(result, operand);
1825 }
1826}
1827
1828
1829void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1830 String::Encoding encoding = instr->hydrogen()->encoding();
1831 Register string = ToRegister(instr->string());
1832 Register value = ToRegister(instr->value());
1833
1834 if (FLAG_debug_code) {
1835 Register index = ToRegister(instr->index());
1836 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1837 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1838 int encoding_mask =
1839 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1840 ? one_byte_seq_type
1841 : two_byte_seq_type;
1842 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1843 }
1844
1845 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1846 if (encoding == String::ONE_BYTE_ENCODING) {
1847 __ stb(value, operand);
1848 } else {
1849 __ sth(value, operand);
1850 }
1851}
1852
1853
1854void LCodeGen::DoAddI(LAddI* instr) {
1855 LOperand* right = instr->right();
1856 Register left = ToRegister(instr->left());
1857 Register result = ToRegister(instr->result());
1858 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1859#if V8_TARGET_ARCH_PPC64
1860 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1861 instr->hydrogen()->representation().IsExternal());
1862#else
1863 const bool isInteger = false;
1864#endif
1865
1866 if (!can_overflow || isInteger) {
1867 if (right->IsConstantOperand()) {
1868 __ Add(result, left, ToOperand(right).immediate(), r0);
1869 } else {
1870 __ add(result, left, EmitLoadRegister(right, ip));
1871 }
1872#if V8_TARGET_ARCH_PPC64
1873 if (can_overflow) {
1874 __ TestIfInt32(result, r0);
1875 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1876 }
1877#endif
1878 } else {
1879 if (right->IsConstantOperand()) {
1880 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
1881 scratch0(), r0);
1882 } else {
1883 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1884 scratch0(), r0);
1885 }
1886 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1887 }
1888}
1889
1890
1891void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1892 LOperand* left = instr->left();
1893 LOperand* right = instr->right();
1894 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1895 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1896 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1897 Register left_reg = ToRegister(left);
1898 Register right_reg = EmitLoadRegister(right, ip);
1899 Register result_reg = ToRegister(instr->result());
1900 Label return_left, done;
1901#if V8_TARGET_ARCH_PPC64
1902 if (instr->hydrogen_value()->representation().IsSmi()) {
1903#endif
1904 __ cmp(left_reg, right_reg);
1905#if V8_TARGET_ARCH_PPC64
1906 } else {
1907 __ cmpw(left_reg, right_reg);
1908 }
1909#endif
1910 if (CpuFeatures::IsSupported(ISELECT)) {
1911 __ isel(cond, result_reg, left_reg, right_reg);
1912 } else {
1913 __ b(cond, &return_left);
1914 __ Move(result_reg, right_reg);
1915 __ b(&done);
1916 __ bind(&return_left);
1917 __ Move(result_reg, left_reg);
1918 __ bind(&done);
1919 }
1920 } else {
1921 DCHECK(instr->hydrogen()->representation().IsDouble());
1922 DoubleRegister left_reg = ToDoubleRegister(left);
1923 DoubleRegister right_reg = ToDoubleRegister(right);
1924 DoubleRegister result_reg = ToDoubleRegister(instr->result());
1925 Label check_nan_left, check_zero, return_left, return_right, done;
1926 __ fcmpu(left_reg, right_reg);
1927 __ bunordered(&check_nan_left);
1928 __ beq(&check_zero);
1929 __ b(cond, &return_left);
1930 __ b(&return_right);
1931
1932 __ bind(&check_zero);
1933 __ fcmpu(left_reg, kDoubleRegZero);
1934 __ bne(&return_left); // left == right != 0.
1935
1936 // At this point, both left and right are either 0 or -0.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001937 if (operation == HMathMinMax::kMathMin) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001938 // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
1939 // different registers is most efficiently expressed as -((-L) - R).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001940 __ fneg(left_reg, left_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001941 if (left_reg.is(right_reg)) {
1942 __ fadd(result_reg, left_reg, right_reg);
1943 } else {
1944 __ fsub(result_reg, left_reg, right_reg);
1945 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001946 __ fneg(result_reg, result_reg);
1947 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001948 // Max: The following works because +0 + -0 == +0
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001949 __ fadd(result_reg, left_reg, right_reg);
1950 }
1951 __ b(&done);
1952
1953 __ bind(&check_nan_left);
1954 __ fcmpu(left_reg, left_reg);
1955 __ bunordered(&return_left); // left == NaN.
1956
1957 __ bind(&return_right);
1958 if (!right_reg.is(result_reg)) {
1959 __ fmr(result_reg, right_reg);
1960 }
1961 __ b(&done);
1962
1963 __ bind(&return_left);
1964 if (!left_reg.is(result_reg)) {
1965 __ fmr(result_reg, left_reg);
1966 }
1967 __ bind(&done);
1968 }
1969}
1970
1971
1972void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1973 DoubleRegister left = ToDoubleRegister(instr->left());
1974 DoubleRegister right = ToDoubleRegister(instr->right());
1975 DoubleRegister result = ToDoubleRegister(instr->result());
1976 switch (instr->op()) {
1977 case Token::ADD:
1978 __ fadd(result, left, right);
1979 break;
1980 case Token::SUB:
1981 __ fsub(result, left, right);
1982 break;
1983 case Token::MUL:
1984 __ fmul(result, left, right);
1985 break;
1986 case Token::DIV:
1987 __ fdiv(result, left, right);
1988 break;
1989 case Token::MOD: {
1990 __ PrepareCallCFunction(0, 2, scratch0());
1991 __ MovToFloatParameters(left, right);
1992 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1993 0, 2);
1994 // Move the result in the double result register.
1995 __ MovFromFloatResult(result);
1996 break;
1997 }
1998 default:
1999 UNREACHABLE();
2000 break;
2001 }
2002}
2003
2004
2005void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2006 DCHECK(ToRegister(instr->context()).is(cp));
2007 DCHECK(ToRegister(instr->left()).is(r4));
2008 DCHECK(ToRegister(instr->right()).is(r3));
2009 DCHECK(ToRegister(instr->result()).is(r3));
2010
Ben Murdoch097c5b22016-05-18 11:27:45 +01002011 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002012 CallCode(code, RelocInfo::CODE_TARGET, instr);
2013}
2014
2015
2016template <class InstrType>
2017void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2018 int left_block = instr->TrueDestination(chunk_);
2019 int right_block = instr->FalseDestination(chunk_);
2020
2021 int next_block = GetNextEmittedBlock();
2022
2023 if (right_block == left_block || cond == al) {
2024 EmitGoto(left_block);
2025 } else if (left_block == next_block) {
2026 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2027 } else if (right_block == next_block) {
2028 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2029 } else {
2030 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2031 __ b(chunk_->GetAssemblyLabel(right_block));
2032 }
2033}
2034
2035
2036template <class InstrType>
2037void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
2038 int true_block = instr->TrueDestination(chunk_);
2039 __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
2040}
2041
2042
2043template <class InstrType>
2044void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2045 int false_block = instr->FalseDestination(chunk_);
2046 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2047}
2048
2049
2050void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2051
2052
2053void LCodeGen::DoBranch(LBranch* instr) {
2054 Representation r = instr->hydrogen()->value()->representation();
2055 DoubleRegister dbl_scratch = double_scratch0();
2056 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2057 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2058
2059 if (r.IsInteger32()) {
2060 DCHECK(!info()->IsStub());
2061 Register reg = ToRegister(instr->value());
2062 __ cmpwi(reg, Operand::Zero());
2063 EmitBranch(instr, ne);
2064 } else if (r.IsSmi()) {
2065 DCHECK(!info()->IsStub());
2066 Register reg = ToRegister(instr->value());
2067 __ cmpi(reg, Operand::Zero());
2068 EmitBranch(instr, ne);
2069 } else if (r.IsDouble()) {
2070 DCHECK(!info()->IsStub());
2071 DoubleRegister reg = ToDoubleRegister(instr->value());
2072 // Test the double value. Zero and NaN are false.
2073 __ fcmpu(reg, kDoubleRegZero, cr7);
2074 __ mfcr(r0);
2075 __ andi(r0, r0, Operand(crZOrNaNBits));
2076 EmitBranch(instr, eq, cr0);
2077 } else {
2078 DCHECK(r.IsTagged());
2079 Register reg = ToRegister(instr->value());
2080 HType type = instr->hydrogen()->value()->type();
2081 if (type.IsBoolean()) {
2082 DCHECK(!info()->IsStub());
2083 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2084 EmitBranch(instr, eq);
2085 } else if (type.IsSmi()) {
2086 DCHECK(!info()->IsStub());
2087 __ cmpi(reg, Operand::Zero());
2088 EmitBranch(instr, ne);
2089 } else if (type.IsJSArray()) {
2090 DCHECK(!info()->IsStub());
2091 EmitBranch(instr, al);
2092 } else if (type.IsHeapNumber()) {
2093 DCHECK(!info()->IsStub());
2094 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2095 // Test the double value. Zero and NaN are false.
2096 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2097 __ mfcr(r0);
2098 __ andi(r0, r0, Operand(crZOrNaNBits));
2099 EmitBranch(instr, eq, cr0);
2100 } else if (type.IsString()) {
2101 DCHECK(!info()->IsStub());
2102 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2103 __ cmpi(ip, Operand::Zero());
2104 EmitBranch(instr, ne);
2105 } else {
2106 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2107 // Avoid deopts in the case where we've never executed this path before.
2108 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2109
2110 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2111 // undefined -> false.
2112 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2113 __ beq(instr->FalseLabel(chunk_));
2114 }
2115 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2116 // Boolean -> its value.
2117 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2118 __ beq(instr->TrueLabel(chunk_));
2119 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2120 __ beq(instr->FalseLabel(chunk_));
2121 }
2122 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2123 // 'null' -> false.
2124 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2125 __ beq(instr->FalseLabel(chunk_));
2126 }
2127
2128 if (expected.Contains(ToBooleanStub::SMI)) {
2129 // Smis: 0 -> false, all other -> true.
2130 __ cmpi(reg, Operand::Zero());
2131 __ beq(instr->FalseLabel(chunk_));
2132 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2133 } else if (expected.NeedsMap()) {
2134 // If we need a map later and have a Smi -> deopt.
2135 __ TestIfSmi(reg, r0);
2136 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2137 }
2138
2139 const Register map = scratch0();
2140 if (expected.NeedsMap()) {
2141 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2142
2143 if (expected.CanBeUndetectable()) {
2144 // Undetectable -> false.
2145 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2146 __ TestBit(ip, Map::kIsUndetectable, r0);
2147 __ bne(instr->FalseLabel(chunk_), cr0);
2148 }
2149 }
2150
2151 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2152 // spec object -> true.
2153 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2154 __ bge(instr->TrueLabel(chunk_));
2155 }
2156
2157 if (expected.Contains(ToBooleanStub::STRING)) {
2158 // String value -> false iff empty.
2159 Label not_string;
2160 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2161 __ bge(&not_string);
2162 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2163 __ cmpi(ip, Operand::Zero());
2164 __ bne(instr->TrueLabel(chunk_));
2165 __ b(instr->FalseLabel(chunk_));
2166 __ bind(&not_string);
2167 }
2168
2169 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2170 // Symbol value -> true.
2171 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2172 __ beq(instr->TrueLabel(chunk_));
2173 }
2174
2175 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2176 // SIMD value -> true.
2177 Label not_simd;
2178 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2179 __ beq(instr->TrueLabel(chunk_));
2180 }
2181
2182 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2183 // heap number -> false iff +0, -0, or NaN.
2184 Label not_heap_number;
2185 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2186 __ bne(&not_heap_number);
2187 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2188 // Test the double value. Zero and NaN are false.
2189 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2190 __ mfcr(r0);
2191 __ andi(r0, r0, Operand(crZOrNaNBits));
2192 __ bne(instr->FalseLabel(chunk_), cr0);
2193 __ b(instr->TrueLabel(chunk_));
2194 __ bind(&not_heap_number);
2195 }
2196
2197 if (!expected.IsGeneric()) {
2198 // We've seen something for the first time -> deopt.
2199 // This can only happen if we are not generic already.
2200 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2201 }
2202 }
2203 }
2204}
2205
2206
2207void LCodeGen::EmitGoto(int block) {
2208 if (!IsNextEmittedBlock(block)) {
2209 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2210 }
2211}
2212
2213
2214void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2215
2216
2217Condition LCodeGen::TokenToCondition(Token::Value op) {
2218 Condition cond = kNoCondition;
2219 switch (op) {
2220 case Token::EQ:
2221 case Token::EQ_STRICT:
2222 cond = eq;
2223 break;
2224 case Token::NE:
2225 case Token::NE_STRICT:
2226 cond = ne;
2227 break;
2228 case Token::LT:
2229 cond = lt;
2230 break;
2231 case Token::GT:
2232 cond = gt;
2233 break;
2234 case Token::LTE:
2235 cond = le;
2236 break;
2237 case Token::GTE:
2238 cond = ge;
2239 break;
2240 case Token::IN:
2241 case Token::INSTANCEOF:
2242 default:
2243 UNREACHABLE();
2244 }
2245 return cond;
2246}
2247
2248
2249void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2250 LOperand* left = instr->left();
2251 LOperand* right = instr->right();
2252 bool is_unsigned =
2253 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2254 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2255 Condition cond = TokenToCondition(instr->op());
2256
2257 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2258 // We can statically evaluate the comparison.
2259 double left_val = ToDouble(LConstantOperand::cast(left));
2260 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002261 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002262 ? instr->TrueDestination(chunk_)
2263 : instr->FalseDestination(chunk_);
2264 EmitGoto(next_block);
2265 } else {
2266 if (instr->is_double()) {
2267 // Compare left and right operands as doubles and load the
2268 // resulting flags into the normal status register.
2269 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2270 // If a NaN is involved, i.e. the result is unordered,
2271 // jump to false block label.
2272 __ bunordered(instr->FalseLabel(chunk_));
2273 } else {
2274 if (right->IsConstantOperand()) {
2275 int32_t value = ToInteger32(LConstantOperand::cast(right));
2276 if (instr->hydrogen_value()->representation().IsSmi()) {
2277 if (is_unsigned) {
2278 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2279 } else {
2280 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2281 }
2282 } else {
2283 if (is_unsigned) {
2284 __ Cmplwi(ToRegister(left), Operand(value), r0);
2285 } else {
2286 __ Cmpwi(ToRegister(left), Operand(value), r0);
2287 }
2288 }
2289 } else if (left->IsConstantOperand()) {
2290 int32_t value = ToInteger32(LConstantOperand::cast(left));
2291 if (instr->hydrogen_value()->representation().IsSmi()) {
2292 if (is_unsigned) {
2293 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2294 } else {
2295 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2296 }
2297 } else {
2298 if (is_unsigned) {
2299 __ Cmplwi(ToRegister(right), Operand(value), r0);
2300 } else {
2301 __ Cmpwi(ToRegister(right), Operand(value), r0);
2302 }
2303 }
2304 // We commuted the operands, so commute the condition.
2305 cond = CommuteCondition(cond);
2306 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2307 if (is_unsigned) {
2308 __ cmpl(ToRegister(left), ToRegister(right));
2309 } else {
2310 __ cmp(ToRegister(left), ToRegister(right));
2311 }
2312 } else {
2313 if (is_unsigned) {
2314 __ cmplw(ToRegister(left), ToRegister(right));
2315 } else {
2316 __ cmpw(ToRegister(left), ToRegister(right));
2317 }
2318 }
2319 }
2320 EmitBranch(instr, cond);
2321 }
2322}
2323
2324
2325void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2326 Register left = ToRegister(instr->left());
2327 Register right = ToRegister(instr->right());
2328
2329 __ cmp(left, right);
2330 EmitBranch(instr, eq);
2331}
2332
2333
2334void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2335 if (instr->hydrogen()->representation().IsTagged()) {
2336 Register input_reg = ToRegister(instr->object());
2337 __ mov(ip, Operand(factory()->the_hole_value()));
2338 __ cmp(input_reg, ip);
2339 EmitBranch(instr, eq);
2340 return;
2341 }
2342
2343 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2344 __ fcmpu(input_reg, input_reg);
2345 EmitFalseBranch(instr, ordered);
2346
2347 Register scratch = scratch0();
2348 __ MovDoubleHighToInt(scratch, input_reg);
2349 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2350 EmitBranch(instr, eq);
2351}
2352
2353
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002354Condition LCodeGen::EmitIsString(Register input, Register temp1,
2355 Label* is_not_string,
2356 SmiCheck check_needed = INLINE_SMI_CHECK) {
2357 if (check_needed == INLINE_SMI_CHECK) {
2358 __ JumpIfSmi(input, is_not_string);
2359 }
2360 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2361
2362 return lt;
2363}
2364
2365
2366void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2367 Register reg = ToRegister(instr->value());
2368 Register temp1 = ToRegister(instr->temp());
2369
2370 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2371 ? OMIT_SMI_CHECK
2372 : INLINE_SMI_CHECK;
2373 Condition true_cond =
2374 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2375
2376 EmitBranch(instr, true_cond);
2377}
2378
2379
2380void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2381 Register input_reg = EmitLoadRegister(instr->value(), ip);
2382 __ TestIfSmi(input_reg, r0);
2383 EmitBranch(instr, eq, cr0);
2384}
2385
2386
2387void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2388 Register input = ToRegister(instr->value());
2389 Register temp = ToRegister(instr->temp());
2390
2391 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2392 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2393 }
2394 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2395 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2396 __ TestBit(temp, Map::kIsUndetectable, r0);
2397 EmitBranch(instr, ne, cr0);
2398}
2399
2400
2401static Condition ComputeCompareCondition(Token::Value op) {
2402 switch (op) {
2403 case Token::EQ_STRICT:
2404 case Token::EQ:
2405 return eq;
2406 case Token::LT:
2407 return lt;
2408 case Token::GT:
2409 return gt;
2410 case Token::LTE:
2411 return le;
2412 case Token::GTE:
2413 return ge;
2414 default:
2415 UNREACHABLE();
2416 return kNoCondition;
2417 }
2418}
2419
2420
2421void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2422 DCHECK(ToRegister(instr->context()).is(cp));
2423 DCHECK(ToRegister(instr->left()).is(r4));
2424 DCHECK(ToRegister(instr->right()).is(r3));
2425
2426 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2427 CallCode(code, RelocInfo::CODE_TARGET, instr);
2428 __ cmpi(r3, Operand::Zero());
2429
2430 EmitBranch(instr, ComputeCompareCondition(instr->op()));
2431}
2432
2433
2434static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2435 InstanceType from = instr->from();
2436 InstanceType to = instr->to();
2437 if (from == FIRST_TYPE) return to;
2438 DCHECK(from == to || to == LAST_TYPE);
2439 return from;
2440}
2441
2442
2443static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2444 InstanceType from = instr->from();
2445 InstanceType to = instr->to();
2446 if (from == to) return eq;
2447 if (to == LAST_TYPE) return ge;
2448 if (from == FIRST_TYPE) return le;
2449 UNREACHABLE();
2450 return eq;
2451}
2452
2453
2454void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2455 Register scratch = scratch0();
2456 Register input = ToRegister(instr->value());
2457
2458 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2459 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2460 }
2461
2462 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2463 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2464}
2465
2466
2467void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2468 Register input = ToRegister(instr->value());
2469 Register result = ToRegister(instr->result());
2470
2471 __ AssertString(input);
2472
2473 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2474 __ IndexFromHash(result, result);
2475}
2476
2477
2478void LCodeGen::DoHasCachedArrayIndexAndBranch(
2479 LHasCachedArrayIndexAndBranch* instr) {
2480 Register input = ToRegister(instr->value());
2481 Register scratch = scratch0();
2482
2483 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2484 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2485 __ and_(r0, scratch, r0, SetRC);
2486 EmitBranch(instr, eq, cr0);
2487}
2488
2489
2490// Branches to a label or falls through with the answer in flags. Trashes
2491// the temp registers, but not the input.
2492void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2493 Handle<String> class_name, Register input,
2494 Register temp, Register temp2) {
2495 DCHECK(!input.is(temp));
2496 DCHECK(!input.is(temp2));
2497 DCHECK(!temp.is(temp2));
2498
2499 __ JumpIfSmi(input, is_false);
2500
2501 __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
2502 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2503 __ beq(is_true);
2504 } else {
2505 __ beq(is_false);
2506 }
2507
2508 // Check if the constructor in the map is a function.
2509 Register instance_type = ip;
2510 __ GetMapConstructor(temp, temp, temp2, instance_type);
2511
2512 // Objects with a non-function constructor have class 'Object'.
2513 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2514 if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2515 __ bne(is_true);
2516 } else {
2517 __ bne(is_false);
2518 }
2519
2520 // temp now contains the constructor function. Grab the
2521 // instance class name from there.
2522 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2523 __ LoadP(temp,
2524 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2525 // The class name we are testing against is internalized since it's a literal.
2526 // The name in the constructor is internalized because of the way the context
2527 // is booted. This routine isn't expected to work for random API-created
2528 // classes and it doesn't have to because you can't access it with natives
2529 // syntax. Since both sides are internalized it is sufficient to use an
2530 // identity comparison.
2531 __ Cmpi(temp, Operand(class_name), r0);
2532 // End with the answer in flags.
2533}
2534
2535
2536void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2537 Register input = ToRegister(instr->value());
2538 Register temp = scratch0();
2539 Register temp2 = ToRegister(instr->temp());
2540 Handle<String> class_name = instr->hydrogen()->class_name();
2541
2542 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2543 class_name, input, temp, temp2);
2544
2545 EmitBranch(instr, eq);
2546}
2547
2548
2549void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2550 Register reg = ToRegister(instr->value());
2551 Register temp = ToRegister(instr->temp());
2552
2553 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2554 __ Cmpi(temp, Operand(instr->map()), r0);
2555 EmitBranch(instr, eq);
2556}
2557
2558
2559void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2560 DCHECK(ToRegister(instr->context()).is(cp));
2561 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2562 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2563 DCHECK(ToRegister(instr->result()).is(r3));
2564 InstanceOfStub stub(isolate());
2565 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2566}
2567
2568
2569void LCodeGen::DoHasInPrototypeChainAndBranch(
2570 LHasInPrototypeChainAndBranch* instr) {
2571 Register const object = ToRegister(instr->object());
2572 Register const object_map = scratch0();
2573 Register const object_instance_type = ip;
2574 Register const object_prototype = object_map;
2575 Register const prototype = ToRegister(instr->prototype());
2576
2577 // The {object} must be a spec object. It's sufficient to know that {object}
2578 // is not a smi, since all other non-spec objects have {null} prototypes and
2579 // will be ruled out below.
2580 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2581 __ TestIfSmi(object, r0);
2582 EmitFalseBranch(instr, eq, cr0);
2583 }
2584
2585 // Loop through the {object}s prototype chain looking for the {prototype}.
2586 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2587 Label loop;
2588 __ bind(&loop);
2589
2590 // Deoptimize if the object needs to be access checked.
2591 __ lbz(object_instance_type,
2592 FieldMemOperand(object_map, Map::kBitFieldOffset));
2593 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2594 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
2595 // Deoptimize for proxies.
2596 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2597 DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
2598 __ LoadP(object_prototype,
2599 FieldMemOperand(object_map, Map::kPrototypeOffset));
2600 __ cmp(object_prototype, prototype);
2601 EmitTrueBranch(instr, eq);
2602 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2603 EmitFalseBranch(instr, eq);
2604 __ LoadP(object_map,
2605 FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2606 __ b(&loop);
2607}
2608
2609
2610void LCodeGen::DoCmpT(LCmpT* instr) {
2611 DCHECK(ToRegister(instr->context()).is(cp));
2612 Token::Value op = instr->op();
2613
Ben Murdoch097c5b22016-05-18 11:27:45 +01002614 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002615 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2616 // This instruction also signals no smi code inlined
2617 __ cmpi(r3, Operand::Zero());
2618
2619 Condition condition = ComputeCompareCondition(op);
2620 if (CpuFeatures::IsSupported(ISELECT)) {
2621 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2622 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2623 __ isel(condition, ToRegister(instr->result()), r4, r5);
2624 } else {
2625 Label true_value, done;
2626
2627 __ b(condition, &true_value);
2628
2629 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2630 __ b(&done);
2631
2632 __ bind(&true_value);
2633 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2634
2635 __ bind(&done);
2636 }
2637}
2638
2639
2640void LCodeGen::DoReturn(LReturn* instr) {
2641 if (FLAG_trace && info()->IsOptimizing()) {
2642 // Push the return value on the stack as the parameter.
2643 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2644 // managed by the register allocator and tearing down the frame, it's
2645 // safe to write to the context register.
2646 __ push(r3);
2647 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2648 __ CallRuntime(Runtime::kTraceExit);
2649 }
2650 if (info()->saves_caller_doubles()) {
2651 RestoreCallerDoubles();
2652 }
2653 if (instr->has_constant_parameter_count()) {
2654 int parameter_count = ToInteger32(instr->constant_parameter_count());
2655 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2656 if (NeedsEagerFrame()) {
2657 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2658 } else if (sp_delta != 0) {
2659 __ addi(sp, sp, Operand(sp_delta));
2660 }
2661 } else {
2662 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2663 Register reg = ToRegister(instr->parameter_count());
2664 // The argument count parameter is a smi
2665 if (NeedsEagerFrame()) {
2666 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2667 }
2668 __ SmiToPtrArrayOffset(r0, reg);
2669 __ add(sp, sp, r0);
2670 }
2671
2672 __ blr();
2673}
2674
2675
2676template <class T>
2677void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2678 Register vector_register = ToRegister(instr->temp_vector());
2679 Register slot_register = LoadDescriptor::SlotRegister();
2680 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2681 DCHECK(slot_register.is(r3));
2682
2683 AllowDeferredHandleDereference vector_structure_check;
2684 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2685 __ Move(vector_register, vector);
2686 // No need to allocate this register.
2687 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2688 int index = vector->GetIndex(slot);
2689 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2690}
2691
2692
2693template <class T>
2694void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2695 Register vector_register = ToRegister(instr->temp_vector());
2696 Register slot_register = ToRegister(instr->temp_slot());
2697
2698 AllowDeferredHandleDereference vector_structure_check;
2699 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2700 __ Move(vector_register, vector);
2701 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2702 int index = vector->GetIndex(slot);
2703 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2704}
2705
2706
2707void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2708 DCHECK(ToRegister(instr->context()).is(cp));
2709 DCHECK(ToRegister(instr->global_object())
2710 .is(LoadDescriptor::ReceiverRegister()));
2711 DCHECK(ToRegister(instr->result()).is(r3));
2712
2713 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2714 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002715 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2716 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2717 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002718 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2719}
2720
2721
2722void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2723 Register context = ToRegister(instr->context());
2724 Register result = ToRegister(instr->result());
2725 __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2726 if (instr->hydrogen()->RequiresHoleCheck()) {
2727 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2728 if (instr->hydrogen()->DeoptimizesOnHole()) {
2729 __ cmp(result, ip);
2730 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2731 } else {
2732 if (CpuFeatures::IsSupported(ISELECT)) {
2733 Register scratch = scratch0();
2734 __ mov(scratch, Operand(factory()->undefined_value()));
2735 __ cmp(result, ip);
2736 __ isel(eq, result, scratch, result);
2737 } else {
2738 Label skip;
2739 __ cmp(result, ip);
2740 __ bne(&skip);
2741 __ mov(result, Operand(factory()->undefined_value()));
2742 __ bind(&skip);
2743 }
2744 }
2745 }
2746}
2747
2748
2749void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2750 Register context = ToRegister(instr->context());
2751 Register value = ToRegister(instr->value());
2752 Register scratch = scratch0();
2753 MemOperand target = ContextMemOperand(context, instr->slot_index());
2754
2755 Label skip_assignment;
2756
2757 if (instr->hydrogen()->RequiresHoleCheck()) {
2758 __ LoadP(scratch, target);
2759 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2760 __ cmp(scratch, ip);
2761 if (instr->hydrogen()->DeoptimizesOnHole()) {
2762 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2763 } else {
2764 __ bne(&skip_assignment);
2765 }
2766 }
2767
2768 __ StoreP(value, target, r0);
2769 if (instr->hydrogen()->NeedsWriteBarrier()) {
2770 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2771 ? OMIT_SMI_CHECK
2772 : INLINE_SMI_CHECK;
2773 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
2774 GetLinkRegisterState(), kSaveFPRegs,
2775 EMIT_REMEMBERED_SET, check_needed);
2776 }
2777
2778 __ bind(&skip_assignment);
2779}
2780
2781
2782void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2783 HObjectAccess access = instr->hydrogen()->access();
2784 int offset = access.offset();
2785 Register object = ToRegister(instr->object());
2786
2787 if (access.IsExternalMemory()) {
2788 Register result = ToRegister(instr->result());
2789 MemOperand operand = MemOperand(object, offset);
2790 __ LoadRepresentation(result, operand, access.representation(), r0);
2791 return;
2792 }
2793
2794 if (instr->hydrogen()->representation().IsDouble()) {
2795 DCHECK(access.IsInobject());
2796 DoubleRegister result = ToDoubleRegister(instr->result());
2797 __ lfd(result, FieldMemOperand(object, offset));
2798 return;
2799 }
2800
2801 Register result = ToRegister(instr->result());
2802 if (!access.IsInobject()) {
2803 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2804 object = result;
2805 }
2806
2807 Representation representation = access.representation();
2808
2809#if V8_TARGET_ARCH_PPC64
2810 // 64-bit Smi optimization
2811 if (representation.IsSmi() &&
2812 instr->hydrogen()->representation().IsInteger32()) {
2813 // Read int value directly from upper half of the smi.
2814 offset = SmiWordOffset(offset);
2815 representation = Representation::Integer32();
2816 }
2817#endif
2818
2819 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
2820 r0);
2821}
2822
2823
2824void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2825 DCHECK(ToRegister(instr->context()).is(cp));
2826 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2827 DCHECK(ToRegister(instr->result()).is(r3));
2828
2829 // Name is always in r5.
2830 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2831 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002832 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2833 isolate(), NOT_INSIDE_TYPEOF,
2834 instr->hydrogen()->initialization_state())
2835 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002836 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2837}
2838
2839
2840void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2841 Register scratch = scratch0();
2842 Register function = ToRegister(instr->function());
2843 Register result = ToRegister(instr->result());
2844
2845 // Get the prototype or initial map from the function.
2846 __ LoadP(result,
2847 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2848
2849 // Check that the function has a prototype or an initial map.
2850 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2851 __ cmp(result, ip);
2852 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2853
2854 // If the function does not have an initial map, we're done.
2855 if (CpuFeatures::IsSupported(ISELECT)) {
2856 // Get the prototype from the initial map (optimistic).
2857 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
2858 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2859 __ isel(eq, result, ip, result);
2860 } else {
2861 Label done;
2862 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2863 __ bne(&done);
2864
2865 // Get the prototype from the initial map.
2866 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2867
2868 // All done.
2869 __ bind(&done);
2870 }
2871}
2872
2873
2874void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2875 Register result = ToRegister(instr->result());
2876 __ LoadRoot(result, instr->index());
2877}
2878
2879
2880void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2881 Register arguments = ToRegister(instr->arguments());
2882 Register result = ToRegister(instr->result());
2883 // There are two words between the frame pointer and the last argument.
2884 // Subtracting from length accounts for one of them add one more.
2885 if (instr->length()->IsConstantOperand()) {
2886 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2887 if (instr->index()->IsConstantOperand()) {
2888 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2889 int index = (const_length - const_index) + 1;
2890 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
2891 } else {
2892 Register index = ToRegister(instr->index());
2893 __ subfic(result, index, Operand(const_length + 1));
2894 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2895 __ LoadPX(result, MemOperand(arguments, result));
2896 }
2897 } else if (instr->index()->IsConstantOperand()) {
2898 Register length = ToRegister(instr->length());
2899 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2900 int loc = const_index - 1;
2901 if (loc != 0) {
2902 __ subi(result, length, Operand(loc));
2903 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2904 __ LoadPX(result, MemOperand(arguments, result));
2905 } else {
2906 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
2907 __ LoadPX(result, MemOperand(arguments, result));
2908 }
2909 } else {
2910 Register length = ToRegister(instr->length());
2911 Register index = ToRegister(instr->index());
2912 __ sub(result, length, index);
2913 __ addi(result, result, Operand(1));
2914 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2915 __ LoadPX(result, MemOperand(arguments, result));
2916 }
2917}
2918
2919
2920void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2921 Register external_pointer = ToRegister(instr->elements());
2922 Register key = no_reg;
2923 ElementsKind elements_kind = instr->elements_kind();
2924 bool key_is_constant = instr->key()->IsConstantOperand();
2925 int constant_key = 0;
2926 if (key_is_constant) {
2927 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2928 if (constant_key & 0xF0000000) {
2929 Abort(kArrayIndexConstantValueTooBig);
2930 }
2931 } else {
2932 key = ToRegister(instr->key());
2933 }
2934 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2935 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2936 int base_offset = instr->base_offset();
2937
2938 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2939 DoubleRegister result = ToDoubleRegister(instr->result());
2940 if (key_is_constant) {
2941 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
2942 r0);
2943 } else {
2944 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
2945 __ add(scratch0(), external_pointer, r0);
2946 }
2947 if (elements_kind == FLOAT32_ELEMENTS) {
2948 __ lfs(result, MemOperand(scratch0(), base_offset));
2949 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2950 __ lfd(result, MemOperand(scratch0(), base_offset));
2951 }
2952 } else {
2953 Register result = ToRegister(instr->result());
2954 MemOperand mem_operand =
2955 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
2956 constant_key, element_size_shift, base_offset);
2957 switch (elements_kind) {
2958 case INT8_ELEMENTS:
2959 if (key_is_constant) {
2960 __ LoadByte(result, mem_operand, r0);
2961 } else {
2962 __ lbzx(result, mem_operand);
2963 }
2964 __ extsb(result, result);
2965 break;
2966 case UINT8_ELEMENTS:
2967 case UINT8_CLAMPED_ELEMENTS:
2968 if (key_is_constant) {
2969 __ LoadByte(result, mem_operand, r0);
2970 } else {
2971 __ lbzx(result, mem_operand);
2972 }
2973 break;
2974 case INT16_ELEMENTS:
2975 if (key_is_constant) {
2976 __ LoadHalfWordArith(result, mem_operand, r0);
2977 } else {
2978 __ lhax(result, mem_operand);
2979 }
2980 break;
2981 case UINT16_ELEMENTS:
2982 if (key_is_constant) {
2983 __ LoadHalfWord(result, mem_operand, r0);
2984 } else {
2985 __ lhzx(result, mem_operand);
2986 }
2987 break;
2988 case INT32_ELEMENTS:
2989 if (key_is_constant) {
2990 __ LoadWordArith(result, mem_operand, r0);
2991 } else {
2992 __ lwax(result, mem_operand);
2993 }
2994 break;
2995 case UINT32_ELEMENTS:
2996 if (key_is_constant) {
2997 __ LoadWord(result, mem_operand, r0);
2998 } else {
2999 __ lwzx(result, mem_operand);
3000 }
3001 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3002 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3003 __ cmplw(result, r0);
3004 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
3005 }
3006 break;
3007 case FLOAT32_ELEMENTS:
3008 case FLOAT64_ELEMENTS:
3009 case FAST_HOLEY_DOUBLE_ELEMENTS:
3010 case FAST_HOLEY_ELEMENTS:
3011 case FAST_HOLEY_SMI_ELEMENTS:
3012 case FAST_DOUBLE_ELEMENTS:
3013 case FAST_ELEMENTS:
3014 case FAST_SMI_ELEMENTS:
3015 case DICTIONARY_ELEMENTS:
3016 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3017 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003018 case FAST_STRING_WRAPPER_ELEMENTS:
3019 case SLOW_STRING_WRAPPER_ELEMENTS:
3020 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003021 UNREACHABLE();
3022 break;
3023 }
3024 }
3025}
3026
3027
3028void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3029 Register elements = ToRegister(instr->elements());
3030 bool key_is_constant = instr->key()->IsConstantOperand();
3031 Register key = no_reg;
3032 DoubleRegister result = ToDoubleRegister(instr->result());
3033 Register scratch = scratch0();
3034
3035 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3036 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3037 int constant_key = 0;
3038 if (key_is_constant) {
3039 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3040 if (constant_key & 0xF0000000) {
3041 Abort(kArrayIndexConstantValueTooBig);
3042 }
3043 } else {
3044 key = ToRegister(instr->key());
3045 }
3046
3047 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3048 if (!key_is_constant) {
3049 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3050 __ add(scratch, elements, r0);
3051 elements = scratch;
3052 }
3053 if (!is_int16(base_offset)) {
3054 __ Add(scratch, elements, base_offset, r0);
3055 base_offset = 0;
3056 elements = scratch;
3057 }
3058 __ lfd(result, MemOperand(elements, base_offset));
3059
3060 if (instr->hydrogen()->RequiresHoleCheck()) {
3061 if (is_int16(base_offset + Register::kExponentOffset)) {
3062 __ lwz(scratch,
3063 MemOperand(elements, base_offset + Register::kExponentOffset));
3064 } else {
3065 __ addi(scratch, elements, Operand(base_offset));
3066 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3067 }
3068 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3069 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3070 }
3071}
3072
3073
3074void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3075 HLoadKeyed* hinstr = instr->hydrogen();
3076 Register elements = ToRegister(instr->elements());
3077 Register result = ToRegister(instr->result());
3078 Register scratch = scratch0();
3079 Register store_base = scratch;
3080 int offset = instr->base_offset();
3081
3082 if (instr->key()->IsConstantOperand()) {
3083 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3084 offset += ToInteger32(const_operand) * kPointerSize;
3085 store_base = elements;
3086 } else {
3087 Register key = ToRegister(instr->key());
3088 // Even though the HLoadKeyed instruction forces the input
3089 // representation for the key to be an integer, the input gets replaced
3090 // during bound check elimination with the index argument to the bounds
3091 // check, which can be tagged, so that case must be handled here, too.
3092 if (hinstr->key()->representation().IsSmi()) {
3093 __ SmiToPtrArrayOffset(r0, key);
3094 } else {
3095 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3096 }
3097 __ add(scratch, elements, r0);
3098 }
3099
3100 bool requires_hole_check = hinstr->RequiresHoleCheck();
3101 Representation representation = hinstr->representation();
3102
3103#if V8_TARGET_ARCH_PPC64
3104 // 64-bit Smi optimization
3105 if (representation.IsInteger32() &&
3106 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3107 DCHECK(!requires_hole_check);
3108 // Read int value directly from upper half of the smi.
3109 offset = SmiWordOffset(offset);
3110 }
3111#endif
3112
3113 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3114 r0);
3115
3116 // Check for the hole value.
3117 if (requires_hole_check) {
3118 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3119 __ TestIfSmi(result, r0);
3120 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3121 } else {
3122 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3123 __ cmp(result, scratch);
3124 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3125 }
3126 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3127 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3128 Label done;
3129 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3130 __ cmp(result, scratch);
3131 __ bne(&done);
3132 if (info()->IsStub()) {
3133 // A stub can safely convert the hole to undefined only if the array
3134 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3135 // it needs to bail out.
3136 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3137 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3138 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3139 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3140 }
3141 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3142 __ bind(&done);
3143 }
3144}
3145
3146
3147void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3148 if (instr->is_fixed_typed_array()) {
3149 DoLoadKeyedExternalArray(instr);
3150 } else if (instr->hydrogen()->representation().IsDouble()) {
3151 DoLoadKeyedFixedDoubleArray(instr);
3152 } else {
3153 DoLoadKeyedFixedArray(instr);
3154 }
3155}
3156
3157
3158MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3159 bool key_is_constant, bool key_is_smi,
3160 int constant_key,
3161 int element_size_shift,
3162 int base_offset) {
3163 Register scratch = scratch0();
3164
3165 if (key_is_constant) {
3166 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3167 }
3168
3169 bool needs_shift =
3170 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3171
3172 if (!(base_offset || needs_shift)) {
3173 return MemOperand(base, key);
3174 }
3175
3176 if (needs_shift) {
3177 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3178 key = scratch;
3179 }
3180
3181 if (base_offset) {
3182 __ Add(scratch, key, base_offset, r0);
3183 }
3184
3185 return MemOperand(base, scratch);
3186}
3187
3188
3189void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3190 DCHECK(ToRegister(instr->context()).is(cp));
3191 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3192 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3193
3194 if (instr->hydrogen()->HasVectorAndSlot()) {
3195 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3196 }
3197
3198 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003199 isolate(), instr->hydrogen()->initialization_state())
3200 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003201 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3202}
3203
3204
3205void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3206 Register scratch = scratch0();
3207 Register result = ToRegister(instr->result());
3208
3209 if (instr->hydrogen()->from_inlined()) {
3210 __ subi(result, sp, Operand(2 * kPointerSize));
3211 } else {
3212 // Check if the calling frame is an arguments adaptor frame.
3213 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3214 __ LoadP(result,
3215 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3216 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3217
3218 // Result is the frame pointer for the frame if not adapted and for the real
3219 // frame below the adaptor frame if adapted.
3220 if (CpuFeatures::IsSupported(ISELECT)) {
3221 __ isel(eq, result, scratch, fp);
3222 } else {
3223 Label done, adapted;
3224 __ beq(&adapted);
3225 __ mr(result, fp);
3226 __ b(&done);
3227
3228 __ bind(&adapted);
3229 __ mr(result, scratch);
3230 __ bind(&done);
3231 }
3232 }
3233}
3234
3235
3236void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3237 Register elem = ToRegister(instr->elements());
3238 Register result = ToRegister(instr->result());
3239
3240 Label done;
3241
3242 // If no arguments adaptor frame the number of arguments is fixed.
3243 __ cmp(fp, elem);
3244 __ mov(result, Operand(scope()->num_parameters()));
3245 __ beq(&done);
3246
3247 // Arguments adaptor frame present. Get argument length from there.
3248 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3249 __ LoadP(result,
3250 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3251 __ SmiUntag(result);
3252
3253 // Argument length is in result register.
3254 __ bind(&done);
3255}
3256
3257
3258void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3259 Register receiver = ToRegister(instr->receiver());
3260 Register function = ToRegister(instr->function());
3261 Register result = ToRegister(instr->result());
3262 Register scratch = scratch0();
3263
3264 // If the receiver is null or undefined, we have to pass the global
3265 // object as a receiver to normal functions. Values have to be
3266 // passed unchanged to builtins and strict-mode functions.
3267 Label global_object, result_in_receiver;
3268
3269 if (!instr->hydrogen()->known_function()) {
3270 // Do not transform the receiver to object for strict mode
3271 // functions or builtins.
3272 __ LoadP(scratch,
3273 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3274 __ lwz(scratch,
3275 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3276 __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
3277 (1 << SharedFunctionInfo::kNativeBit)));
3278 __ bne(&result_in_receiver, cr0);
3279 }
3280
3281 // Normal function. Replace undefined or null with global receiver.
3282 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3283 __ cmp(receiver, scratch);
3284 __ beq(&global_object);
3285 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3286 __ cmp(receiver, scratch);
3287 __ beq(&global_object);
3288
3289 // Deoptimize if the receiver is not a JS object.
3290 __ TestIfSmi(receiver, r0);
3291 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3292 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3293 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3294
3295 __ b(&result_in_receiver);
3296 __ bind(&global_object);
3297 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3298 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3299 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3300
3301 if (result.is(receiver)) {
3302 __ bind(&result_in_receiver);
3303 } else {
3304 Label result_ok;
3305 __ b(&result_ok);
3306 __ bind(&result_in_receiver);
3307 __ mr(result, receiver);
3308 __ bind(&result_ok);
3309 }
3310}
3311
3312
3313void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3314 Register receiver = ToRegister(instr->receiver());
3315 Register function = ToRegister(instr->function());
3316 Register length = ToRegister(instr->length());
3317 Register elements = ToRegister(instr->elements());
3318 Register scratch = scratch0();
3319 DCHECK(receiver.is(r3)); // Used for parameter count.
3320 DCHECK(function.is(r4)); // Required by InvokeFunction.
3321 DCHECK(ToRegister(instr->result()).is(r3));
3322
3323 // Copy the arguments to this function possibly from the
3324 // adaptor frame below it.
3325 const uint32_t kArgumentsLimit = 1 * KB;
3326 __ cmpli(length, Operand(kArgumentsLimit));
3327 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3328
3329 // Push the receiver and use the register to keep the original
3330 // number of arguments.
3331 __ push(receiver);
3332 __ mr(receiver, length);
3333 // The arguments are at a one pointer size offset from elements.
3334 __ addi(elements, elements, Operand(1 * kPointerSize));
3335
3336 // Loop through the arguments pushing them onto the execution
3337 // stack.
3338 Label invoke, loop;
3339 // length is a small non-negative integer, due to the test above.
3340 __ cmpi(length, Operand::Zero());
3341 __ beq(&invoke);
3342 __ mtctr(length);
3343 __ bind(&loop);
3344 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3345 __ LoadPX(scratch, MemOperand(elements, r0));
3346 __ push(scratch);
3347 __ addi(length, length, Operand(-1));
3348 __ bdnz(&loop);
3349
3350 __ bind(&invoke);
3351 DCHECK(instr->HasPointerMap());
3352 LPointerMap* pointers = instr->pointer_map();
3353 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3354 // The number of arguments is stored in receiver which is r3, as expected
3355 // by InvokeFunction.
3356 ParameterCount actual(receiver);
3357 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3358 safepoint_generator);
3359}
3360
3361
3362void LCodeGen::DoPushArgument(LPushArgument* instr) {
3363 LOperand* argument = instr->value();
3364 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3365 Abort(kDoPushArgumentNotImplementedForDoubleType);
3366 } else {
3367 Register argument_reg = EmitLoadRegister(argument, ip);
3368 __ push(argument_reg);
3369 }
3370}
3371
3372
3373void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3374
3375
3376void LCodeGen::DoThisFunction(LThisFunction* instr) {
3377 Register result = ToRegister(instr->result());
3378 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3379}
3380
3381
3382void LCodeGen::DoContext(LContext* instr) {
3383 // If there is a non-return use, the context must be moved to a register.
3384 Register result = ToRegister(instr->result());
3385 if (info()->IsOptimizing()) {
3386 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3387 } else {
3388 // If there is no frame, the context must be in cp.
3389 DCHECK(result.is(cp));
3390 }
3391}
3392
3393
3394void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3395 DCHECK(ToRegister(instr->context()).is(cp));
3396 __ Move(scratch0(), instr->hydrogen()->pairs());
3397 __ push(scratch0());
3398 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3399 __ push(scratch0());
3400 CallRuntime(Runtime::kDeclareGlobals, instr);
3401}
3402
3403
3404void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3405 int formal_parameter_count, int arity,
3406 LInstruction* instr) {
3407 bool dont_adapt_arguments =
3408 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3409 bool can_invoke_directly =
3410 dont_adapt_arguments || formal_parameter_count == arity;
3411
3412 Register function_reg = r4;
3413
3414 LPointerMap* pointers = instr->pointer_map();
3415
3416 if (can_invoke_directly) {
3417 // Change context.
3418 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3419
3420 // Always initialize new target and number of actual arguments.
3421 __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
3422 __ mov(r3, Operand(arity));
3423
3424 bool is_self_call = function.is_identical_to(info()->closure());
3425
3426 // Invoke function.
3427 if (is_self_call) {
3428 __ CallSelf();
3429 } else {
3430 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3431 __ CallJSEntry(ip);
3432 }
3433
3434 // Set up deoptimization.
3435 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3436 } else {
3437 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3438 ParameterCount count(arity);
3439 ParameterCount expected(formal_parameter_count);
3440 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3441 }
3442}
3443
3444
3445void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3446 DCHECK(instr->context() != NULL);
3447 DCHECK(ToRegister(instr->context()).is(cp));
3448 Register input = ToRegister(instr->value());
3449 Register result = ToRegister(instr->result());
3450 Register scratch = scratch0();
3451
3452 // Deoptimize if not a heap number.
3453 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3454 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3455 __ cmp(scratch, ip);
3456 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3457
3458 Label done;
3459 Register exponent = scratch0();
3460 scratch = no_reg;
3461 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3462 // Check the sign of the argument. If the argument is positive, just
3463 // return it.
3464 __ cmpwi(exponent, Operand::Zero());
3465 // Move the input to the result if necessary.
3466 __ Move(result, input);
3467 __ bge(&done);
3468
3469 // Input is negative. Reverse its sign.
3470 // Preserve the value of all registers.
3471 {
3472 PushSafepointRegistersScope scope(this);
3473
3474 // Registers were saved at the safepoint, so we can use
3475 // many scratch registers.
3476 Register tmp1 = input.is(r4) ? r3 : r4;
3477 Register tmp2 = input.is(r5) ? r3 : r5;
3478 Register tmp3 = input.is(r6) ? r3 : r6;
3479 Register tmp4 = input.is(r7) ? r3 : r7;
3480
3481 // exponent: floating point exponent value.
3482
3483 Label allocated, slow;
3484 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3485 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3486 __ b(&allocated);
3487
3488 // Slow case: Call the runtime system to do the number allocation.
3489 __ bind(&slow);
3490
3491 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3492 instr->context());
3493 // Set the pointer to the new heap number in tmp.
3494 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3495 // Restore input_reg after call to runtime.
3496 __ LoadFromSafepointRegisterSlot(input, input);
3497 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3498
3499 __ bind(&allocated);
3500 // exponent: floating point exponent value.
3501 // tmp1: allocated heap number.
3502 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3503 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3504 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3505 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3506 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3507
3508 __ StoreToSafepointRegisterSlot(tmp1, result);
3509 }
3510
3511 __ bind(&done);
3512}
3513
3514
3515void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3516 Register input = ToRegister(instr->value());
3517 Register result = ToRegister(instr->result());
3518 Label done;
3519 __ cmpi(input, Operand::Zero());
3520 __ Move(result, input);
3521 __ bge(&done);
3522 __ li(r0, Operand::Zero()); // clear xer
3523 __ mtxer(r0);
3524 __ neg(result, result, SetOE, SetRC);
3525 // Deoptimize on overflow.
3526 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3527 __ bind(&done);
3528}
3529
3530
3531#if V8_TARGET_ARCH_PPC64
3532void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3533 Register input = ToRegister(instr->value());
3534 Register result = ToRegister(instr->result());
3535 Label done;
3536 __ cmpwi(input, Operand::Zero());
3537 __ Move(result, input);
3538 __ bge(&done);
3539
3540 // Deoptimize on overflow.
3541 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3542 __ cmpw(input, r0);
3543 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3544
3545 __ neg(result, result);
3546 __ bind(&done);
3547}
3548#endif
3549
3550
3551void LCodeGen::DoMathAbs(LMathAbs* instr) {
3552 // Class for deferred case.
3553 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3554 public:
3555 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3556 : LDeferredCode(codegen), instr_(instr) {}
3557 void Generate() override {
3558 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3559 }
3560 LInstruction* instr() override { return instr_; }
3561
3562 private:
3563 LMathAbs* instr_;
3564 };
3565
3566 Representation r = instr->hydrogen()->value()->representation();
3567 if (r.IsDouble()) {
3568 DoubleRegister input = ToDoubleRegister(instr->value());
3569 DoubleRegister result = ToDoubleRegister(instr->result());
3570 __ fabs(result, input);
3571#if V8_TARGET_ARCH_PPC64
3572 } else if (r.IsInteger32()) {
3573 EmitInteger32MathAbs(instr);
3574 } else if (r.IsSmi()) {
3575#else
3576 } else if (r.IsSmiOrInteger32()) {
3577#endif
3578 EmitMathAbs(instr);
3579 } else {
3580 // Representation is tagged.
3581 DeferredMathAbsTaggedHeapNumber* deferred =
3582 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3583 Register input = ToRegister(instr->value());
3584 // Smi check.
3585 __ JumpIfNotSmi(input, deferred->entry());
3586 // If smi, handle it directly.
3587 EmitMathAbs(instr);
3588 __ bind(deferred->exit());
3589 }
3590}
3591
3592
3593void LCodeGen::DoMathFloor(LMathFloor* instr) {
3594 DoubleRegister input = ToDoubleRegister(instr->value());
3595 Register result = ToRegister(instr->result());
3596 Register input_high = scratch0();
3597 Register scratch = ip;
3598 Label done, exact;
3599
3600 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3601 &exact);
3602 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3603
3604 __ bind(&exact);
3605 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3606 // Test for -0.
3607 __ cmpi(result, Operand::Zero());
3608 __ bne(&done);
3609 __ cmpwi(input_high, Operand::Zero());
3610 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3611 }
3612 __ bind(&done);
3613}
3614
3615
3616void LCodeGen::DoMathRound(LMathRound* instr) {
3617 DoubleRegister input = ToDoubleRegister(instr->value());
3618 Register result = ToRegister(instr->result());
3619 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3620 DoubleRegister input_plus_dot_five = double_scratch1;
3621 Register scratch1 = scratch0();
3622 Register scratch2 = ip;
3623 DoubleRegister dot_five = double_scratch0();
3624 Label convert, done;
3625
3626 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3627 __ fabs(double_scratch1, input);
3628 __ fcmpu(double_scratch1, dot_five);
3629 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
3630 // If input is in [-0.5, -0], the result is -0.
3631 // If input is in [+0, +0.5[, the result is +0.
3632 // If the input is +0.5, the result is 1.
3633 __ bgt(&convert); // Out of [-0.5, +0.5].
3634 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003635 // [-0.5, -0] (negative) yields minus zero.
3636 __ TestDoubleSign(input, scratch1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003637 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3638 }
3639 __ fcmpu(input, dot_five);
3640 if (CpuFeatures::IsSupported(ISELECT)) {
3641 __ li(result, Operand(1));
3642 __ isel(lt, result, r0, result);
3643 __ b(&done);
3644 } else {
3645 Label return_zero;
3646 __ bne(&return_zero);
3647 __ li(result, Operand(1)); // +0.5.
3648 __ b(&done);
3649 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3650 // flag kBailoutOnMinusZero.
3651 __ bind(&return_zero);
3652 __ li(result, Operand::Zero());
3653 __ b(&done);
3654 }
3655
3656 __ bind(&convert);
3657 __ fadd(input_plus_dot_five, input, dot_five);
3658 // Reuse dot_five (double_scratch0) as we no longer need this value.
3659 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3660 double_scratch0(), &done, &done);
3661 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3662 __ bind(&done);
3663}
3664
3665
3666void LCodeGen::DoMathFround(LMathFround* instr) {
3667 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3668 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3669 __ frsp(output_reg, input_reg);
3670}
3671
3672
3673void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3674 DoubleRegister input = ToDoubleRegister(instr->value());
3675 DoubleRegister result = ToDoubleRegister(instr->result());
3676 __ fsqrt(result, input);
3677}
3678
3679
3680void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3681 DoubleRegister input = ToDoubleRegister(instr->value());
3682 DoubleRegister result = ToDoubleRegister(instr->result());
3683 DoubleRegister temp = double_scratch0();
3684
3685 // Note that according to ECMA-262 15.8.2.13:
3686 // Math.pow(-Infinity, 0.5) == Infinity
3687 // Math.sqrt(-Infinity) == NaN
3688 Label skip, done;
3689
3690 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3691 __ fcmpu(input, temp);
3692 __ bne(&skip);
3693 __ fneg(result, temp);
3694 __ b(&done);
3695
3696 // Add +0 to convert -0 to +0.
3697 __ bind(&skip);
3698 __ fadd(result, input, kDoubleRegZero);
3699 __ fsqrt(result, result);
3700 __ bind(&done);
3701}
3702
3703
3704void LCodeGen::DoPower(LPower* instr) {
3705 Representation exponent_type = instr->hydrogen()->right()->representation();
3706// Having marked this as a call, we can use any registers.
3707// Just make sure that the input/output registers are the expected ones.
3708 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3709 DCHECK(!instr->right()->IsDoubleRegister() ||
3710 ToDoubleRegister(instr->right()).is(d2));
3711 DCHECK(!instr->right()->IsRegister() ||
3712 ToRegister(instr->right()).is(tagged_exponent));
3713 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3714 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3715
3716 if (exponent_type.IsSmi()) {
3717 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3718 __ CallStub(&stub);
3719 } else if (exponent_type.IsTagged()) {
3720 Label no_deopt;
3721 __ JumpIfSmi(tagged_exponent, &no_deopt);
3722 DCHECK(!r10.is(tagged_exponent));
3723 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3724 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3725 __ cmp(r10, ip);
3726 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3727 __ bind(&no_deopt);
3728 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3729 __ CallStub(&stub);
3730 } else if (exponent_type.IsInteger32()) {
3731 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3732 __ CallStub(&stub);
3733 } else {
3734 DCHECK(exponent_type.IsDouble());
3735 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3736 __ CallStub(&stub);
3737 }
3738}
3739
3740
3741void LCodeGen::DoMathExp(LMathExp* instr) {
3742 DoubleRegister input = ToDoubleRegister(instr->value());
3743 DoubleRegister result = ToDoubleRegister(instr->result());
3744 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3745 DoubleRegister double_scratch2 = double_scratch0();
3746 Register temp1 = ToRegister(instr->temp1());
3747 Register temp2 = ToRegister(instr->temp2());
3748
3749 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
3750 double_scratch2, temp1, temp2, scratch0());
3751}
3752
3753
3754void LCodeGen::DoMathLog(LMathLog* instr) {
3755 __ PrepareCallCFunction(0, 1, scratch0());
3756 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3757 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
3758 1);
3759 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3760}
3761
3762
3763void LCodeGen::DoMathClz32(LMathClz32* instr) {
3764 Register input = ToRegister(instr->value());
3765 Register result = ToRegister(instr->result());
3766 __ cntlzw_(result, input);
3767}
3768
3769
3770void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3771 DCHECK(ToRegister(instr->context()).is(cp));
3772 DCHECK(ToRegister(instr->function()).is(r4));
3773 DCHECK(instr->HasPointerMap());
3774
3775 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3776 if (known_function.is_null()) {
3777 LPointerMap* pointers = instr->pointer_map();
3778 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3779 ParameterCount count(instr->arity());
3780 __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
3781 } else {
3782 CallKnownFunction(known_function,
3783 instr->hydrogen()->formal_parameter_count(),
3784 instr->arity(), instr);
3785 }
3786}
3787
3788
3789void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3790 DCHECK(ToRegister(instr->result()).is(r3));
3791
3792 if (instr->hydrogen()->IsTailCall()) {
3793 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3794
3795 if (instr->target()->IsConstantOperand()) {
3796 LConstantOperand* target = LConstantOperand::cast(instr->target());
3797 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3798 __ Jump(code, RelocInfo::CODE_TARGET);
3799 } else {
3800 DCHECK(instr->target()->IsRegister());
3801 Register target = ToRegister(instr->target());
3802 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3803 __ JumpToJSEntry(ip);
3804 }
3805 } else {
3806 LPointerMap* pointers = instr->pointer_map();
3807 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3808
3809 if (instr->target()->IsConstantOperand()) {
3810 LConstantOperand* target = LConstantOperand::cast(instr->target());
3811 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3812 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3813 __ Call(code, RelocInfo::CODE_TARGET);
3814 } else {
3815 DCHECK(instr->target()->IsRegister());
3816 Register target = ToRegister(instr->target());
3817 generator.BeforeCall(__ CallSize(target));
3818 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3819 __ CallJSEntry(ip);
3820 }
3821 generator.AfterCall();
3822 }
3823}
3824
3825
3826void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3827 DCHECK(ToRegister(instr->function()).is(r4));
3828 DCHECK(ToRegister(instr->result()).is(r3));
3829
3830 // Change context.
3831 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3832
3833 // Always initialize new target and number of actual arguments.
3834 __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
3835 __ mov(r3, Operand(instr->arity()));
3836
3837 bool is_self_call = false;
3838 if (instr->hydrogen()->function()->IsConstant()) {
3839 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3840 Handle<JSFunction> jsfun =
3841 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3842 is_self_call = jsfun.is_identical_to(info()->closure());
3843 }
3844
3845 if (is_self_call) {
3846 __ CallSelf();
3847 } else {
3848 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
3849 __ CallJSEntry(ip);
3850 }
3851
3852 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3853}
3854
3855
3856void LCodeGen::DoCallFunction(LCallFunction* instr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003857 HCallFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003858 DCHECK(ToRegister(instr->context()).is(cp));
3859 DCHECK(ToRegister(instr->function()).is(r4));
3860 DCHECK(ToRegister(instr->result()).is(r3));
3861
3862 int arity = instr->arity();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003863 ConvertReceiverMode mode = hinstr->convert_mode();
3864 if (hinstr->HasVectorAndSlot()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003865 Register slot_register = ToRegister(instr->temp_slot());
3866 Register vector_register = ToRegister(instr->temp_vector());
3867 DCHECK(slot_register.is(r6));
3868 DCHECK(vector_register.is(r5));
3869
3870 AllowDeferredHandleDereference vector_structure_check;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003871 Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
3872 int index = vector->GetIndex(hinstr->slot());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003873
3874 __ Move(vector_register, vector);
3875 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
3876
3877 Handle<Code> ic =
3878 CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
3879 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3880 } else {
3881 __ mov(r3, Operand(arity));
3882 CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
3883 }
3884}
3885
3886
3887void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3888 DCHECK(ToRegister(instr->context()).is(cp));
3889 DCHECK(ToRegister(instr->constructor()).is(r4));
3890 DCHECK(ToRegister(instr->result()).is(r3));
3891
3892 __ mov(r3, Operand(instr->arity()));
3893 if (instr->arity() == 1) {
3894 // We only need the allocation site for the case we have a length argument.
3895 // The case may bail out to the runtime, which will determine the correct
3896 // elements kind with the site.
3897 __ Move(r5, instr->hydrogen()->site());
3898 } else {
3899 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
3900 }
3901 ElementsKind kind = instr->hydrogen()->elements_kind();
3902 AllocationSiteOverrideMode override_mode =
3903 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3904 ? DISABLE_ALLOCATION_SITES
3905 : DONT_OVERRIDE;
3906
3907 if (instr->arity() == 0) {
3908 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3909 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3910 } else if (instr->arity() == 1) {
3911 Label done;
3912 if (IsFastPackedElementsKind(kind)) {
3913 Label packed_case;
3914 // We might need a change here
3915 // look at the first argument
3916 __ LoadP(r8, MemOperand(sp, 0));
3917 __ cmpi(r8, Operand::Zero());
3918 __ beq(&packed_case);
3919
3920 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3921 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
3922 override_mode);
3923 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3924 __ b(&done);
3925 __ bind(&packed_case);
3926 }
3927
3928 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3929 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3930 __ bind(&done);
3931 } else {
3932 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3933 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3934 }
3935}
3936
3937
3938void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3939 CallRuntime(instr->function(), instr->arity(), instr);
3940}
3941
3942
3943void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3944 Register function = ToRegister(instr->function());
3945 Register code_object = ToRegister(instr->code_object());
3946 __ addi(code_object, code_object,
3947 Operand(Code::kHeaderSize - kHeapObjectTag));
3948 __ StoreP(code_object,
3949 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
3950}
3951
3952
3953void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3954 Register result = ToRegister(instr->result());
3955 Register base = ToRegister(instr->base_object());
3956 if (instr->offset()->IsConstantOperand()) {
3957 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3958 __ Add(result, base, ToInteger32(offset), r0);
3959 } else {
3960 Register offset = ToRegister(instr->offset());
3961 __ add(result, base, offset);
3962 }
3963}
3964
3965
3966void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3967 HStoreNamedField* hinstr = instr->hydrogen();
3968 Representation representation = instr->representation();
3969
3970 Register object = ToRegister(instr->object());
3971 Register scratch = scratch0();
3972 HObjectAccess access = hinstr->access();
3973 int offset = access.offset();
3974
3975 if (access.IsExternalMemory()) {
3976 Register value = ToRegister(instr->value());
3977 MemOperand operand = MemOperand(object, offset);
3978 __ StoreRepresentation(value, operand, representation, r0);
3979 return;
3980 }
3981
3982 __ AssertNotSmi(object);
3983
3984#if V8_TARGET_ARCH_PPC64
3985 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3986 IsInteger32(LConstantOperand::cast(instr->value())));
3987#else
3988 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3989 IsSmi(LConstantOperand::cast(instr->value())));
3990#endif
3991 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3992 DCHECK(access.IsInobject());
3993 DCHECK(!hinstr->has_transition());
3994 DCHECK(!hinstr->NeedsWriteBarrier());
3995 DoubleRegister value = ToDoubleRegister(instr->value());
3996 __ stfd(value, FieldMemOperand(object, offset));
3997 return;
3998 }
3999
4000 if (hinstr->has_transition()) {
4001 Handle<Map> transition = hinstr->transition_map();
4002 AddDeprecationDependency(transition);
4003 __ mov(scratch, Operand(transition));
4004 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4005 if (hinstr->NeedsWriteBarrierForMap()) {
4006 Register temp = ToRegister(instr->temp());
4007 // Update the write barrier for the map field.
4008 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4009 kSaveFPRegs);
4010 }
4011 }
4012
4013 // Do the store.
4014 Register record_dest = object;
4015 Register record_value = no_reg;
4016 Register record_scratch = scratch;
4017#if V8_TARGET_ARCH_PPC64
4018 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4019 DCHECK(access.IsInobject());
4020 DoubleRegister value = ToDoubleRegister(instr->value());
4021 __ stfd(value, FieldMemOperand(object, offset));
4022 if (hinstr->NeedsWriteBarrier()) {
4023 record_value = ToRegister(instr->value());
4024 }
4025 } else {
4026 if (representation.IsSmi() &&
4027 hinstr->value()->representation().IsInteger32()) {
4028 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4029 // 64-bit Smi optimization
4030 // Store int value directly to upper half of the smi.
4031 offset = SmiWordOffset(offset);
4032 representation = Representation::Integer32();
4033 }
4034#endif
4035 if (access.IsInobject()) {
4036 Register value = ToRegister(instr->value());
4037 MemOperand operand = FieldMemOperand(object, offset);
4038 __ StoreRepresentation(value, operand, representation, r0);
4039 record_value = value;
4040 } else {
4041 Register value = ToRegister(instr->value());
4042 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4043 MemOperand operand = FieldMemOperand(scratch, offset);
4044 __ StoreRepresentation(value, operand, representation, r0);
4045 record_dest = scratch;
4046 record_value = value;
4047 record_scratch = object;
4048 }
4049#if V8_TARGET_ARCH_PPC64
4050 }
4051#endif
4052
4053 if (hinstr->NeedsWriteBarrier()) {
4054 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4055 GetLinkRegisterState(), kSaveFPRegs,
4056 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4057 hinstr->PointersToHereCheckForValue());
4058 }
4059}
4060
4061
4062void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4063 DCHECK(ToRegister(instr->context()).is(cp));
4064 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4065 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4066
4067 if (instr->hydrogen()->HasVectorAndSlot()) {
4068 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4069 }
4070
4071 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4072 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4073 isolate(), instr->language_mode(),
4074 instr->hydrogen()->initialization_state()).code();
4075 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4076}
4077
4078
4079void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4080 Representation representation = instr->hydrogen()->length()->representation();
4081 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4082 DCHECK(representation.IsSmiOrInteger32());
4083
4084 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4085 if (instr->length()->IsConstantOperand()) {
4086 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4087 Register index = ToRegister(instr->index());
4088 if (representation.IsSmi()) {
4089 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4090 } else {
4091 __ Cmplwi(index, Operand(length), r0);
4092 }
4093 cc = CommuteCondition(cc);
4094 } else if (instr->index()->IsConstantOperand()) {
4095 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4096 Register length = ToRegister(instr->length());
4097 if (representation.IsSmi()) {
4098 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4099 } else {
4100 __ Cmplwi(length, Operand(index), r0);
4101 }
4102 } else {
4103 Register index = ToRegister(instr->index());
4104 Register length = ToRegister(instr->length());
4105 if (representation.IsSmi()) {
4106 __ cmpl(length, index);
4107 } else {
4108 __ cmplw(length, index);
4109 }
4110 }
4111 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4112 Label done;
4113 __ b(NegateCondition(cc), &done);
4114 __ stop("eliminated bounds check failed");
4115 __ bind(&done);
4116 } else {
4117 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4118 }
4119}
4120
4121
4122void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4123 Register external_pointer = ToRegister(instr->elements());
4124 Register key = no_reg;
4125 ElementsKind elements_kind = instr->elements_kind();
4126 bool key_is_constant = instr->key()->IsConstantOperand();
4127 int constant_key = 0;
4128 if (key_is_constant) {
4129 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4130 if (constant_key & 0xF0000000) {
4131 Abort(kArrayIndexConstantValueTooBig);
4132 }
4133 } else {
4134 key = ToRegister(instr->key());
4135 }
4136 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4137 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4138 int base_offset = instr->base_offset();
4139
4140 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4141 Register address = scratch0();
4142 DoubleRegister value(ToDoubleRegister(instr->value()));
4143 if (key_is_constant) {
4144 if (constant_key != 0) {
4145 __ Add(address, external_pointer, constant_key << element_size_shift,
4146 r0);
4147 } else {
4148 address = external_pointer;
4149 }
4150 } else {
4151 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4152 __ add(address, external_pointer, r0);
4153 }
4154 if (elements_kind == FLOAT32_ELEMENTS) {
4155 __ frsp(double_scratch0(), value);
4156 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4157 } else { // Storing doubles, not floats.
4158 __ stfd(value, MemOperand(address, base_offset));
4159 }
4160 } else {
4161 Register value(ToRegister(instr->value()));
4162 MemOperand mem_operand =
4163 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4164 constant_key, element_size_shift, base_offset);
4165 switch (elements_kind) {
4166 case UINT8_ELEMENTS:
4167 case UINT8_CLAMPED_ELEMENTS:
4168 case INT8_ELEMENTS:
4169 if (key_is_constant) {
4170 __ StoreByte(value, mem_operand, r0);
4171 } else {
4172 __ stbx(value, mem_operand);
4173 }
4174 break;
4175 case INT16_ELEMENTS:
4176 case UINT16_ELEMENTS:
4177 if (key_is_constant) {
4178 __ StoreHalfWord(value, mem_operand, r0);
4179 } else {
4180 __ sthx(value, mem_operand);
4181 }
4182 break;
4183 case INT32_ELEMENTS:
4184 case UINT32_ELEMENTS:
4185 if (key_is_constant) {
4186 __ StoreWord(value, mem_operand, r0);
4187 } else {
4188 __ stwx(value, mem_operand);
4189 }
4190 break;
4191 case FLOAT32_ELEMENTS:
4192 case FLOAT64_ELEMENTS:
4193 case FAST_DOUBLE_ELEMENTS:
4194 case FAST_ELEMENTS:
4195 case FAST_SMI_ELEMENTS:
4196 case FAST_HOLEY_DOUBLE_ELEMENTS:
4197 case FAST_HOLEY_ELEMENTS:
4198 case FAST_HOLEY_SMI_ELEMENTS:
4199 case DICTIONARY_ELEMENTS:
4200 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4201 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004202 case FAST_STRING_WRAPPER_ELEMENTS:
4203 case SLOW_STRING_WRAPPER_ELEMENTS:
4204 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004205 UNREACHABLE();
4206 break;
4207 }
4208 }
4209}
4210
4211
4212void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4213 DoubleRegister value = ToDoubleRegister(instr->value());
4214 Register elements = ToRegister(instr->elements());
4215 Register key = no_reg;
4216 Register scratch = scratch0();
4217 DoubleRegister double_scratch = double_scratch0();
4218 bool key_is_constant = instr->key()->IsConstantOperand();
4219 int constant_key = 0;
4220
4221 // Calculate the effective address of the slot in the array to store the
4222 // double value.
4223 if (key_is_constant) {
4224 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4225 if (constant_key & 0xF0000000) {
4226 Abort(kArrayIndexConstantValueTooBig);
4227 }
4228 } else {
4229 key = ToRegister(instr->key());
4230 }
4231 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4232 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4233 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4234 if (!key_is_constant) {
4235 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4236 __ add(scratch, elements, scratch);
4237 elements = scratch;
4238 }
4239 if (!is_int16(base_offset)) {
4240 __ Add(scratch, elements, base_offset, r0);
4241 base_offset = 0;
4242 elements = scratch;
4243 }
4244
4245 if (instr->NeedsCanonicalization()) {
4246 // Turn potential sNaN value into qNaN.
4247 __ CanonicalizeNaN(double_scratch, value);
4248 __ stfd(double_scratch, MemOperand(elements, base_offset));
4249 } else {
4250 __ stfd(value, MemOperand(elements, base_offset));
4251 }
4252}
4253
4254
4255void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4256 HStoreKeyed* hinstr = instr->hydrogen();
4257 Register value = ToRegister(instr->value());
4258 Register elements = ToRegister(instr->elements());
4259 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4260 Register scratch = scratch0();
4261 Register store_base = scratch;
4262 int offset = instr->base_offset();
4263
4264 // Do the store.
4265 if (instr->key()->IsConstantOperand()) {
4266 DCHECK(!hinstr->NeedsWriteBarrier());
4267 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4268 offset += ToInteger32(const_operand) * kPointerSize;
4269 store_base = elements;
4270 } else {
4271 // Even though the HLoadKeyed instruction forces the input
4272 // representation for the key to be an integer, the input gets replaced
4273 // during bound check elimination with the index argument to the bounds
4274 // check, which can be tagged, so that case must be handled here, too.
4275 if (hinstr->key()->representation().IsSmi()) {
4276 __ SmiToPtrArrayOffset(scratch, key);
4277 } else {
4278 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4279 }
4280 __ add(scratch, elements, scratch);
4281 }
4282
4283 Representation representation = hinstr->value()->representation();
4284
4285#if V8_TARGET_ARCH_PPC64
4286 // 64-bit Smi optimization
4287 if (representation.IsInteger32()) {
4288 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4289 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4290 // Store int value directly to upper half of the smi.
4291 offset = SmiWordOffset(offset);
4292 }
4293#endif
4294
4295 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4296 r0);
4297
4298 if (hinstr->NeedsWriteBarrier()) {
4299 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4300 ? OMIT_SMI_CHECK
4301 : INLINE_SMI_CHECK;
4302 // Compute address of modified element and store it into key register.
4303 __ Add(key, store_base, offset, r0);
4304 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4305 EMIT_REMEMBERED_SET, check_needed,
4306 hinstr->PointersToHereCheckForValue());
4307 }
4308}
4309
4310
4311void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4312 // By cases: external, fast double
4313 if (instr->is_fixed_typed_array()) {
4314 DoStoreKeyedExternalArray(instr);
4315 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4316 DoStoreKeyedFixedDoubleArray(instr);
4317 } else {
4318 DoStoreKeyedFixedArray(instr);
4319 }
4320}
4321
4322
4323void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4324 DCHECK(ToRegister(instr->context()).is(cp));
4325 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4326 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4327 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4328
4329 if (instr->hydrogen()->HasVectorAndSlot()) {
4330 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4331 }
4332
4333 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4334 isolate(), instr->language_mode(),
4335 instr->hydrogen()->initialization_state()).code();
4336 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4337}
4338
4339
4340void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4341 class DeferredMaybeGrowElements final : public LDeferredCode {
4342 public:
4343 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4344 : LDeferredCode(codegen), instr_(instr) {}
4345 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4346 LInstruction* instr() override { return instr_; }
4347
4348 private:
4349 LMaybeGrowElements* instr_;
4350 };
4351
4352 Register result = r3;
4353 DeferredMaybeGrowElements* deferred =
4354 new (zone()) DeferredMaybeGrowElements(this, instr);
4355 LOperand* key = instr->key();
4356 LOperand* current_capacity = instr->current_capacity();
4357
4358 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4359 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4360 DCHECK(key->IsConstantOperand() || key->IsRegister());
4361 DCHECK(current_capacity->IsConstantOperand() ||
4362 current_capacity->IsRegister());
4363
4364 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4365 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4366 int32_t constant_capacity =
4367 ToInteger32(LConstantOperand::cast(current_capacity));
4368 if (constant_key >= constant_capacity) {
4369 // Deferred case.
4370 __ b(deferred->entry());
4371 }
4372 } else if (key->IsConstantOperand()) {
4373 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4374 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
4375 __ ble(deferred->entry());
4376 } else if (current_capacity->IsConstantOperand()) {
4377 int32_t constant_capacity =
4378 ToInteger32(LConstantOperand::cast(current_capacity));
4379 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
4380 __ bge(deferred->entry());
4381 } else {
4382 __ cmpw(ToRegister(key), ToRegister(current_capacity));
4383 __ bge(deferred->entry());
4384 }
4385
4386 if (instr->elements()->IsRegister()) {
4387 __ Move(result, ToRegister(instr->elements()));
4388 } else {
4389 __ LoadP(result, ToMemOperand(instr->elements()));
4390 }
4391
4392 __ bind(deferred->exit());
4393}
4394
4395
4396void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4397 // TODO(3095996): Get rid of this. For now, we need to make the
4398 // result register contain a valid pointer because it is already
4399 // contained in the register pointer map.
4400 Register result = r3;
4401 __ li(result, Operand::Zero());
4402
4403 // We have to call a stub.
4404 {
4405 PushSafepointRegistersScope scope(this);
4406 if (instr->object()->IsRegister()) {
4407 __ Move(result, ToRegister(instr->object()));
4408 } else {
4409 __ LoadP(result, ToMemOperand(instr->object()));
4410 }
4411
4412 LOperand* key = instr->key();
4413 if (key->IsConstantOperand()) {
4414 __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
4415 } else {
4416 __ SmiTag(r6, ToRegister(key));
4417 }
4418
4419 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4420 instr->hydrogen()->kind());
4421 __ CallStub(&stub);
4422 RecordSafepointWithLazyDeopt(
4423 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4424 __ StoreToSafepointRegisterSlot(result, result);
4425 }
4426
4427 // Deopt on smi, which means the elements array changed to dictionary mode.
4428 __ TestIfSmi(result, r0);
4429 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
4430}
4431
4432
4433void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4434 Register object_reg = ToRegister(instr->object());
4435 Register scratch = scratch0();
4436
4437 Handle<Map> from_map = instr->original_map();
4438 Handle<Map> to_map = instr->transitioned_map();
4439 ElementsKind from_kind = instr->from_kind();
4440 ElementsKind to_kind = instr->to_kind();
4441
4442 Label not_applicable;
4443 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4444 __ Cmpi(scratch, Operand(from_map), r0);
4445 __ bne(&not_applicable);
4446
4447 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4448 Register new_map_reg = ToRegister(instr->new_map_temp());
4449 __ mov(new_map_reg, Operand(to_map));
4450 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4451 r0);
4452 // Write barrier.
4453 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4454 GetLinkRegisterState(), kDontSaveFPRegs);
4455 } else {
4456 DCHECK(ToRegister(instr->context()).is(cp));
4457 DCHECK(object_reg.is(r3));
4458 PushSafepointRegistersScope scope(this);
4459 __ Move(r4, to_map);
4460 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4461 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4462 __ CallStub(&stub);
4463 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4464 Safepoint::kLazyDeopt);
4465 }
4466 __ bind(&not_applicable);
4467}
4468
4469
4470void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4471 Register object = ToRegister(instr->object());
4472 Register temp = ToRegister(instr->temp());
4473 Label no_memento_found;
4474 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4475 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4476 __ bind(&no_memento_found);
4477}
4478
4479
4480void LCodeGen::DoStringAdd(LStringAdd* instr) {
4481 DCHECK(ToRegister(instr->context()).is(cp));
4482 DCHECK(ToRegister(instr->left()).is(r4));
4483 DCHECK(ToRegister(instr->right()).is(r3));
4484 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4485 instr->hydrogen()->pretenure_flag());
4486 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4487}
4488
4489
4490void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4491 class DeferredStringCharCodeAt final : public LDeferredCode {
4492 public:
4493 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4494 : LDeferredCode(codegen), instr_(instr) {}
4495 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4496 LInstruction* instr() override { return instr_; }
4497
4498 private:
4499 LStringCharCodeAt* instr_;
4500 };
4501
4502 DeferredStringCharCodeAt* deferred =
4503 new (zone()) DeferredStringCharCodeAt(this, instr);
4504
4505 StringCharLoadGenerator::Generate(
4506 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4507 ToRegister(instr->result()), deferred->entry());
4508 __ bind(deferred->exit());
4509}
4510
4511
4512void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4513 Register string = ToRegister(instr->string());
4514 Register result = ToRegister(instr->result());
4515 Register scratch = scratch0();
4516
4517 // TODO(3095996): Get rid of this. For now, we need to make the
4518 // result register contain a valid pointer because it is already
4519 // contained in the register pointer map.
4520 __ li(result, Operand::Zero());
4521
4522 PushSafepointRegistersScope scope(this);
4523 __ push(string);
4524 // Push the index as a smi. This is safe because of the checks in
4525 // DoStringCharCodeAt above.
4526 if (instr->index()->IsConstantOperand()) {
4527 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4528 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4529 __ push(scratch);
4530 } else {
4531 Register index = ToRegister(instr->index());
4532 __ SmiTag(index);
4533 __ push(index);
4534 }
4535 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4536 instr->context());
4537 __ AssertSmi(r3);
4538 __ SmiUntag(r3);
4539 __ StoreToSafepointRegisterSlot(r3, result);
4540}
4541
4542
4543void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4544 class DeferredStringCharFromCode final : public LDeferredCode {
4545 public:
4546 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4547 : LDeferredCode(codegen), instr_(instr) {}
4548 void Generate() override {
4549 codegen()->DoDeferredStringCharFromCode(instr_);
4550 }
4551 LInstruction* instr() override { return instr_; }
4552
4553 private:
4554 LStringCharFromCode* instr_;
4555 };
4556
4557 DeferredStringCharFromCode* deferred =
4558 new (zone()) DeferredStringCharFromCode(this, instr);
4559
4560 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4561 Register char_code = ToRegister(instr->char_code());
4562 Register result = ToRegister(instr->result());
4563 DCHECK(!char_code.is(result));
4564
4565 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4566 __ bgt(deferred->entry());
4567 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4568 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4569 __ add(result, result, r0);
4570 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4571 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4572 __ cmp(result, ip);
4573 __ beq(deferred->entry());
4574 __ bind(deferred->exit());
4575}
4576
4577
4578void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4579 Register char_code = ToRegister(instr->char_code());
4580 Register result = ToRegister(instr->result());
4581
4582 // TODO(3095996): Get rid of this. For now, we need to make the
4583 // result register contain a valid pointer because it is already
4584 // contained in the register pointer map.
4585 __ li(result, Operand::Zero());
4586
4587 PushSafepointRegistersScope scope(this);
4588 __ SmiTag(char_code);
4589 __ push(char_code);
4590 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4591 instr->context());
4592 __ StoreToSafepointRegisterSlot(r3, result);
4593}
4594
4595
4596void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4597 LOperand* input = instr->value();
4598 DCHECK(input->IsRegister() || input->IsStackSlot());
4599 LOperand* output = instr->result();
4600 DCHECK(output->IsDoubleRegister());
4601 if (input->IsStackSlot()) {
4602 Register scratch = scratch0();
4603 __ LoadP(scratch, ToMemOperand(input));
4604 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4605 } else {
4606 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4607 }
4608}
4609
4610
4611void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4612 LOperand* input = instr->value();
4613 LOperand* output = instr->result();
4614 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4615}
4616
4617
4618void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4619 class DeferredNumberTagI final : public LDeferredCode {
4620 public:
4621 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4622 : LDeferredCode(codegen), instr_(instr) {}
4623 void Generate() override {
4624 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4625 instr_->temp2(), SIGNED_INT32);
4626 }
4627 LInstruction* instr() override { return instr_; }
4628
4629 private:
4630 LNumberTagI* instr_;
4631 };
4632
4633 Register src = ToRegister(instr->value());
4634 Register dst = ToRegister(instr->result());
4635
4636 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4637#if V8_TARGET_ARCH_PPC64
4638 __ SmiTag(dst, src);
4639#else
4640 __ SmiTagCheckOverflow(dst, src, r0);
4641 __ BranchOnOverflow(deferred->entry());
4642#endif
4643 __ bind(deferred->exit());
4644}
4645
4646
4647void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4648 class DeferredNumberTagU final : public LDeferredCode {
4649 public:
4650 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4651 : LDeferredCode(codegen), instr_(instr) {}
4652 void Generate() override {
4653 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4654 instr_->temp2(), UNSIGNED_INT32);
4655 }
4656 LInstruction* instr() override { return instr_; }
4657
4658 private:
4659 LNumberTagU* instr_;
4660 };
4661
4662 Register input = ToRegister(instr->value());
4663 Register result = ToRegister(instr->result());
4664
4665 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4666 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4667 __ bgt(deferred->entry());
4668 __ SmiTag(result, input);
4669 __ bind(deferred->exit());
4670}
4671
4672
4673void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4674 LOperand* temp1, LOperand* temp2,
4675 IntegerSignedness signedness) {
4676 Label done, slow;
4677 Register src = ToRegister(value);
4678 Register dst = ToRegister(instr->result());
4679 Register tmp1 = scratch0();
4680 Register tmp2 = ToRegister(temp1);
4681 Register tmp3 = ToRegister(temp2);
4682 DoubleRegister dbl_scratch = double_scratch0();
4683
4684 if (signedness == SIGNED_INT32) {
4685 // There was overflow, so bits 30 and 31 of the original integer
4686 // disagree. Try to allocate a heap number in new space and store
4687 // the value in there. If that fails, call the runtime system.
4688 if (dst.is(src)) {
4689 __ SmiUntag(src, dst);
4690 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4691 }
4692 __ ConvertIntToDouble(src, dbl_scratch);
4693 } else {
4694 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4695 }
4696
4697 if (FLAG_inline_new) {
4698 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4699 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4700 __ b(&done);
4701 }
4702
4703 // Slow case: Call the runtime system to do the number allocation.
4704 __ bind(&slow);
4705 {
4706 // TODO(3095996): Put a valid pointer value in the stack slot where the
4707 // result register is stored, as this register is in the pointer map, but
4708 // contains an integer value.
4709 __ li(dst, Operand::Zero());
4710
4711 // Preserve the value of all registers.
4712 PushSafepointRegistersScope scope(this);
4713
4714 // NumberTagI and NumberTagD use the context from the frame, rather than
4715 // the environment's HContext or HInlinedContext value.
4716 // They only call Runtime::kAllocateHeapNumber.
4717 // The corresponding HChange instructions are added in a phase that does
4718 // not have easy access to the local context.
4719 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4720 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4721 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4722 Safepoint::kNoLazyDeopt);
4723 __ StoreToSafepointRegisterSlot(r3, dst);
4724 }
4725
4726 // Done. Put the value in dbl_scratch into the value of the allocated heap
4727 // number.
4728 __ bind(&done);
4729 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4730}
4731
4732
4733void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4734 class DeferredNumberTagD final : public LDeferredCode {
4735 public:
4736 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4737 : LDeferredCode(codegen), instr_(instr) {}
4738 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4739 LInstruction* instr() override { return instr_; }
4740
4741 private:
4742 LNumberTagD* instr_;
4743 };
4744
4745 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4746 Register scratch = scratch0();
4747 Register reg = ToRegister(instr->result());
4748 Register temp1 = ToRegister(instr->temp());
4749 Register temp2 = ToRegister(instr->temp2());
4750
4751 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4752 if (FLAG_inline_new) {
4753 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4754 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4755 } else {
4756 __ b(deferred->entry());
4757 }
4758 __ bind(deferred->exit());
4759 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4760}
4761
4762
4763void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4764 // TODO(3095996): Get rid of this. For now, we need to make the
4765 // result register contain a valid pointer because it is already
4766 // contained in the register pointer map.
4767 Register reg = ToRegister(instr->result());
4768 __ li(reg, Operand::Zero());
4769
4770 PushSafepointRegistersScope scope(this);
4771 // NumberTagI and NumberTagD use the context from the frame, rather than
4772 // the environment's HContext or HInlinedContext value.
4773 // They only call Runtime::kAllocateHeapNumber.
4774 // The corresponding HChange instructions are added in a phase that does
4775 // not have easy access to the local context.
4776 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4777 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4778 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4779 Safepoint::kNoLazyDeopt);
4780 __ StoreToSafepointRegisterSlot(r3, reg);
4781}
4782
4783
4784void LCodeGen::DoSmiTag(LSmiTag* instr) {
4785 HChange* hchange = instr->hydrogen();
4786 Register input = ToRegister(instr->value());
4787 Register output = ToRegister(instr->result());
4788 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4789 hchange->value()->CheckFlag(HValue::kUint32)) {
4790 __ TestUnsignedSmiCandidate(input, r0);
4791 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
4792 }
4793#if !V8_TARGET_ARCH_PPC64
4794 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4795 !hchange->value()->CheckFlag(HValue::kUint32)) {
4796 __ SmiTagCheckOverflow(output, input, r0);
4797 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
4798 } else {
4799#endif
4800 __ SmiTag(output, input);
4801#if !V8_TARGET_ARCH_PPC64
4802 }
4803#endif
4804}
4805
4806
4807void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4808 Register scratch = scratch0();
4809 Register input = ToRegister(instr->value());
4810 Register result = ToRegister(instr->result());
4811 if (instr->needs_check()) {
4812 // If the input is a HeapObject, value of scratch won't be zero.
4813 __ andi(scratch, input, Operand(kHeapObjectTag));
4814 __ SmiUntag(result, input);
4815 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4816 } else {
4817 __ SmiUntag(result, input);
4818 }
4819}
4820
4821
4822void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4823 DoubleRegister result_reg,
4824 NumberUntagDMode mode) {
4825 bool can_convert_undefined_to_nan =
4826 instr->hydrogen()->can_convert_undefined_to_nan();
4827 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4828
4829 Register scratch = scratch0();
4830 DCHECK(!result_reg.is(double_scratch0()));
4831
4832 Label convert, load_smi, done;
4833
4834 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4835 // Smi check.
4836 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4837
4838 // Heap number map check.
4839 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4840 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4841 __ cmp(scratch, ip);
4842 if (can_convert_undefined_to_nan) {
4843 __ bne(&convert);
4844 } else {
4845 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4846 }
4847 // load heap number
4848 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4849 if (deoptimize_on_minus_zero) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004850 __ TestDoubleIsMinusZero(result_reg, scratch, ip);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004851 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4852 }
4853 __ b(&done);
4854 if (can_convert_undefined_to_nan) {
4855 __ bind(&convert);
4856 // Convert undefined (and hole) to NaN.
4857 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4858 __ cmp(input_reg, ip);
4859 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4860 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4861 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4862 __ b(&done);
4863 }
4864 } else {
4865 __ SmiUntag(scratch, input_reg);
4866 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4867 }
4868 // Smi to double register conversion
4869 __ bind(&load_smi);
4870 // scratch: untagged value of input_reg
4871 __ ConvertIntToDouble(scratch, result_reg);
4872 __ bind(&done);
4873}
4874
4875
4876void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4877 Register input_reg = ToRegister(instr->value());
4878 Register scratch1 = scratch0();
4879 Register scratch2 = ToRegister(instr->temp());
4880 DoubleRegister double_scratch = double_scratch0();
4881 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4882
4883 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4884 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4885
4886 Label done;
4887
4888 // Heap number map check.
4889 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4890 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4891 __ cmp(scratch1, ip);
4892
4893 if (instr->truncating()) {
4894 // Performs a truncating conversion of a floating point number as used by
4895 // the JS bitwise operations.
4896 Label no_heap_number, check_bools, check_false;
4897 __ bne(&no_heap_number);
4898 __ mr(scratch2, input_reg);
4899 __ TruncateHeapNumberToI(input_reg, scratch2);
4900 __ b(&done);
4901
4902 // Check for Oddballs. Undefined/False is converted to zero and True to one
4903 // for truncating conversions.
4904 __ bind(&no_heap_number);
4905 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4906 __ cmp(input_reg, ip);
4907 __ bne(&check_bools);
4908 __ li(input_reg, Operand::Zero());
4909 __ b(&done);
4910
4911 __ bind(&check_bools);
4912 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4913 __ cmp(input_reg, ip);
4914 __ bne(&check_false);
4915 __ li(input_reg, Operand(1));
4916 __ b(&done);
4917
4918 __ bind(&check_false);
4919 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4920 __ cmp(input_reg, ip);
4921 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4922 __ li(input_reg, Operand::Zero());
4923 } else {
4924 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4925
4926 __ lfd(double_scratch2,
4927 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4928 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4929 // preserve heap number pointer in scratch2 for minus zero check below
4930 __ mr(scratch2, input_reg);
4931 }
4932 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4933 double_scratch);
4934 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4935
4936 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4937 __ cmpi(input_reg, Operand::Zero());
4938 __ bne(&done);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004939 __ TestHeapNumberSign(scratch2, scratch1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004940 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4941 }
4942 }
4943 __ bind(&done);
4944}
4945
4946
4947void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4948 class DeferredTaggedToI final : public LDeferredCode {
4949 public:
4950 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4951 : LDeferredCode(codegen), instr_(instr) {}
4952 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4953 LInstruction* instr() override { return instr_; }
4954
4955 private:
4956 LTaggedToI* instr_;
4957 };
4958
4959 LOperand* input = instr->value();
4960 DCHECK(input->IsRegister());
4961 DCHECK(input->Equals(instr->result()));
4962
4963 Register input_reg = ToRegister(input);
4964
4965 if (instr->hydrogen()->value()->representation().IsSmi()) {
4966 __ SmiUntag(input_reg);
4967 } else {
4968 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
4969
4970 // Branch to deferred code if the input is a HeapObject.
4971 __ JumpIfNotSmi(input_reg, deferred->entry());
4972
4973 __ SmiUntag(input_reg);
4974 __ bind(deferred->exit());
4975 }
4976}
4977
4978
4979void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4980 LOperand* input = instr->value();
4981 DCHECK(input->IsRegister());
4982 LOperand* result = instr->result();
4983 DCHECK(result->IsDoubleRegister());
4984
4985 Register input_reg = ToRegister(input);
4986 DoubleRegister result_reg = ToDoubleRegister(result);
4987
4988 HValue* value = instr->hydrogen()->value();
4989 NumberUntagDMode mode = value->representation().IsSmi()
4990 ? NUMBER_CANDIDATE_IS_SMI
4991 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4992
4993 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4994}
4995
4996
4997void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4998 Register result_reg = ToRegister(instr->result());
4999 Register scratch1 = scratch0();
5000 DoubleRegister double_input = ToDoubleRegister(instr->value());
5001 DoubleRegister double_scratch = double_scratch0();
5002
5003 if (instr->truncating()) {
5004 __ TruncateDoubleToI(result_reg, double_input);
5005 } else {
5006 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5007 double_scratch);
5008 // Deoptimize if the input wasn't a int32 (inside a double).
5009 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5010 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5011 Label done;
5012 __ cmpi(result_reg, Operand::Zero());
5013 __ bne(&done);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005014 __ TestDoubleSign(double_input, scratch1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005015 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5016 __ bind(&done);
5017 }
5018 }
5019}
5020
5021
5022void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5023 Register result_reg = ToRegister(instr->result());
5024 Register scratch1 = scratch0();
5025 DoubleRegister double_input = ToDoubleRegister(instr->value());
5026 DoubleRegister double_scratch = double_scratch0();
5027
5028 if (instr->truncating()) {
5029 __ TruncateDoubleToI(result_reg, double_input);
5030 } else {
5031 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5032 double_scratch);
5033 // Deoptimize if the input wasn't a int32 (inside a double).
5034 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5035 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5036 Label done;
5037 __ cmpi(result_reg, Operand::Zero());
5038 __ bne(&done);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005039 __ TestDoubleSign(double_input, scratch1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005040 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5041 __ bind(&done);
5042 }
5043 }
5044#if V8_TARGET_ARCH_PPC64
5045 __ SmiTag(result_reg);
5046#else
5047 __ SmiTagCheckOverflow(result_reg, r0);
5048 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5049#endif
5050}
5051
5052
5053void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5054 LOperand* input = instr->value();
5055 __ TestIfSmi(ToRegister(input), r0);
5056 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5057}
5058
5059
5060void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5061 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5062 LOperand* input = instr->value();
5063 __ TestIfSmi(ToRegister(input), r0);
5064 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5065 }
5066}
5067
5068
5069void LCodeGen::DoCheckArrayBufferNotNeutered(
5070 LCheckArrayBufferNotNeutered* instr) {
5071 Register view = ToRegister(instr->view());
5072 Register scratch = scratch0();
5073
5074 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5075 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5076 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5077 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
5078}
5079
5080
5081void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5082 Register input = ToRegister(instr->value());
5083 Register scratch = scratch0();
5084
5085 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5086 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5087
5088 if (instr->hydrogen()->is_interval_check()) {
5089 InstanceType first;
5090 InstanceType last;
5091 instr->hydrogen()->GetCheckInterval(&first, &last);
5092
5093 __ cmpli(scratch, Operand(first));
5094
5095 // If there is only one type in the interval check for equality.
5096 if (first == last) {
5097 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5098 } else {
5099 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5100 // Omit check for the last type.
5101 if (last != LAST_TYPE) {
5102 __ cmpli(scratch, Operand(last));
5103 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5104 }
5105 }
5106 } else {
5107 uint8_t mask;
5108 uint8_t tag;
5109 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5110
5111 if (base::bits::IsPowerOfTwo32(mask)) {
5112 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5113 __ andi(r0, scratch, Operand(mask));
5114 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5115 cr0);
5116 } else {
5117 __ andi(scratch, scratch, Operand(mask));
5118 __ cmpi(scratch, Operand(tag));
5119 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5120 }
5121 }
5122}
5123
5124
5125void LCodeGen::DoCheckValue(LCheckValue* instr) {
5126 Register reg = ToRegister(instr->value());
5127 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5128 AllowDeferredHandleDereference smi_check;
5129 if (isolate()->heap()->InNewSpace(*object)) {
5130 Register reg = ToRegister(instr->value());
5131 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5132 __ mov(ip, Operand(cell));
5133 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5134 __ cmp(reg, ip);
5135 } else {
5136 __ Cmpi(reg, Operand(object), r0);
5137 }
5138 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5139}
5140
5141
5142void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5143 Register temp = ToRegister(instr->temp());
5144 {
5145 PushSafepointRegistersScope scope(this);
5146 __ push(object);
5147 __ li(cp, Operand::Zero());
5148 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5149 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5150 Safepoint::kNoLazyDeopt);
5151 __ StoreToSafepointRegisterSlot(r3, temp);
5152 }
5153 __ TestIfSmi(temp, r0);
5154 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5155}
5156
5157
5158void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5159 class DeferredCheckMaps final : public LDeferredCode {
5160 public:
5161 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5162 : LDeferredCode(codegen), instr_(instr), object_(object) {
5163 SetExit(check_maps());
5164 }
5165 void Generate() override {
5166 codegen()->DoDeferredInstanceMigration(instr_, object_);
5167 }
5168 Label* check_maps() { return &check_maps_; }
5169 LInstruction* instr() override { return instr_; }
5170
5171 private:
5172 LCheckMaps* instr_;
5173 Label check_maps_;
5174 Register object_;
5175 };
5176
5177 if (instr->hydrogen()->IsStabilityCheck()) {
5178 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5179 for (int i = 0; i < maps->size(); ++i) {
5180 AddStabilityDependency(maps->at(i).handle());
5181 }
5182 return;
5183 }
5184
5185 Register object = ToRegister(instr->value());
5186 Register map_reg = ToRegister(instr->temp());
5187
5188 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5189
5190 DeferredCheckMaps* deferred = NULL;
5191 if (instr->hydrogen()->HasMigrationTarget()) {
5192 deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5193 __ bind(deferred->check_maps());
5194 }
5195
5196 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5197 Label success;
5198 for (int i = 0; i < maps->size() - 1; i++) {
5199 Handle<Map> map = maps->at(i).handle();
5200 __ CompareMap(map_reg, map, &success);
5201 __ beq(&success);
5202 }
5203
5204 Handle<Map> map = maps->at(maps->size() - 1).handle();
5205 __ CompareMap(map_reg, map, &success);
5206 if (instr->hydrogen()->HasMigrationTarget()) {
5207 __ bne(deferred->entry());
5208 } else {
5209 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5210 }
5211
5212 __ bind(&success);
5213}
5214
5215
5216void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5217 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5218 Register result_reg = ToRegister(instr->result());
5219 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5220}
5221
5222
5223void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5224 Register unclamped_reg = ToRegister(instr->unclamped());
5225 Register result_reg = ToRegister(instr->result());
5226 __ ClampUint8(result_reg, unclamped_reg);
5227}
5228
5229
5230void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5231 Register scratch = scratch0();
5232 Register input_reg = ToRegister(instr->unclamped());
5233 Register result_reg = ToRegister(instr->result());
5234 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5235 Label is_smi, done, heap_number;
5236
5237 // Both smi and heap number cases are handled.
5238 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5239
5240 // Check for heap number
5241 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5242 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5243 __ beq(&heap_number);
5244
5245 // Check for undefined. Undefined is converted to zero for clamping
5246 // conversions.
5247 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5248 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5249 __ li(result_reg, Operand::Zero());
5250 __ b(&done);
5251
5252 // Heap number
5253 __ bind(&heap_number);
5254 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5255 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5256 __ b(&done);
5257
5258 // smi
5259 __ bind(&is_smi);
5260 __ ClampUint8(result_reg, result_reg);
5261
5262 __ bind(&done);
5263}
5264
5265
5266void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5267 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5268 Register result_reg = ToRegister(instr->result());
5269
5270 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5271 __ MovDoubleHighToInt(result_reg, value_reg);
5272 } else {
5273 __ MovDoubleLowToInt(result_reg, value_reg);
5274 }
5275}
5276
5277
5278void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5279 Register hi_reg = ToRegister(instr->hi());
5280 Register lo_reg = ToRegister(instr->lo());
5281 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5282#if V8_TARGET_ARCH_PPC64
5283 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5284#else
5285 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5286#endif
5287}
5288
5289
5290void LCodeGen::DoAllocate(LAllocate* instr) {
5291 class DeferredAllocate final : public LDeferredCode {
5292 public:
5293 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5294 : LDeferredCode(codegen), instr_(instr) {}
5295 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5296 LInstruction* instr() override { return instr_; }
5297
5298 private:
5299 LAllocate* instr_;
5300 };
5301
5302 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5303
5304 Register result = ToRegister(instr->result());
5305 Register scratch = ToRegister(instr->temp1());
5306 Register scratch2 = ToRegister(instr->temp2());
5307
5308 // Allocate memory for the object.
5309 AllocationFlags flags = TAG_OBJECT;
5310 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5311 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5312 }
5313 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5314 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5315 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5316 }
5317
5318 if (instr->size()->IsConstantOperand()) {
5319 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5320 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5321 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5322 } else {
5323 Register size = ToRegister(instr->size());
5324 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5325 }
5326
5327 __ bind(deferred->exit());
5328
5329 if (instr->hydrogen()->MustPrefillWithFiller()) {
5330 if (instr->size()->IsConstantOperand()) {
5331 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5332 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5333 } else {
5334 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5335 }
5336 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5337 Label loop;
5338 __ bind(&loop);
5339 __ subi(scratch, scratch, Operand(kPointerSize));
5340 __ StorePX(scratch2, MemOperand(result, scratch));
5341 __ cmpi(scratch, Operand::Zero());
5342 __ bge(&loop);
5343 }
5344}
5345
5346
5347void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5348 Register result = ToRegister(instr->result());
5349
5350 // TODO(3095996): Get rid of this. For now, we need to make the
5351 // result register contain a valid pointer because it is already
5352 // contained in the register pointer map.
5353 __ LoadSmiLiteral(result, Smi::FromInt(0));
5354
5355 PushSafepointRegistersScope scope(this);
5356 if (instr->size()->IsRegister()) {
5357 Register size = ToRegister(instr->size());
5358 DCHECK(!size.is(result));
5359 __ SmiTag(size);
5360 __ push(size);
5361 } else {
5362 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5363#if !V8_TARGET_ARCH_PPC64
5364 if (size >= 0 && size <= Smi::kMaxValue) {
5365#endif
5366 __ Push(Smi::FromInt(size));
5367#if !V8_TARGET_ARCH_PPC64
5368 } else {
5369 // We should never get here at runtime => abort
5370 __ stop("invalid allocation size");
5371 return;
5372 }
5373#endif
5374 }
5375
5376 int flags = AllocateDoubleAlignFlag::encode(
5377 instr->hydrogen()->MustAllocateDoubleAligned());
5378 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5379 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5380 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5381 } else {
5382 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5383 }
5384 __ Push(Smi::FromInt(flags));
5385
5386 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5387 instr->context());
5388 __ StoreToSafepointRegisterSlot(r3, result);
5389}
5390
5391
5392void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5393 DCHECK(ToRegister(instr->value()).is(r3));
5394 __ push(r3);
5395 CallRuntime(Runtime::kToFastProperties, 1, instr);
5396}
5397
5398
5399void LCodeGen::DoTypeof(LTypeof* instr) {
5400 DCHECK(ToRegister(instr->value()).is(r6));
5401 DCHECK(ToRegister(instr->result()).is(r3));
5402 Label end, do_call;
5403 Register value_register = ToRegister(instr->value());
5404 __ JumpIfNotSmi(value_register, &do_call);
5405 __ mov(r3, Operand(isolate()->factory()->number_string()));
5406 __ b(&end);
5407 __ bind(&do_call);
5408 TypeofStub stub(isolate());
5409 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5410 __ bind(&end);
5411}
5412
5413
5414void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5415 Register input = ToRegister(instr->value());
5416
5417 Condition final_branch_condition =
5418 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5419 instr->type_literal());
5420 if (final_branch_condition != kNoCondition) {
5421 EmitBranch(instr, final_branch_condition);
5422 }
5423}
5424
5425
5426Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5427 Register input, Handle<String> type_name) {
5428 Condition final_branch_condition = kNoCondition;
5429 Register scratch = scratch0();
5430 Factory* factory = isolate()->factory();
5431 if (String::Equals(type_name, factory->number_string())) {
5432 __ JumpIfSmi(input, true_label);
5433 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5434 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5435 final_branch_condition = eq;
5436
5437 } else if (String::Equals(type_name, factory->string_string())) {
5438 __ JumpIfSmi(input, false_label);
5439 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5440 final_branch_condition = lt;
5441
5442 } else if (String::Equals(type_name, factory->symbol_string())) {
5443 __ JumpIfSmi(input, false_label);
5444 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5445 final_branch_condition = eq;
5446
5447 } else if (String::Equals(type_name, factory->boolean_string())) {
5448 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5449 __ beq(true_label);
5450 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5451 final_branch_condition = eq;
5452
5453 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005454 __ CompareRoot(input, Heap::kNullValueRootIndex);
5455 __ beq(false_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005456 __ JumpIfSmi(input, false_label);
5457 // Check for undetectable objects => true.
5458 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5459 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5460 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5461 __ cmpi(r0, Operand::Zero());
5462 final_branch_condition = ne;
5463
5464 } else if (String::Equals(type_name, factory->function_string())) {
5465 __ JumpIfSmi(input, false_label);
5466 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5467 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5468 __ andi(scratch, scratch,
5469 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5470 __ cmpi(scratch, Operand(1 << Map::kIsCallable));
5471 final_branch_condition = eq;
5472
5473 } else if (String::Equals(type_name, factory->object_string())) {
5474 __ JumpIfSmi(input, false_label);
5475 __ CompareRoot(input, Heap::kNullValueRootIndex);
5476 __ beq(true_label);
5477 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5478 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5479 __ blt(false_label);
5480 // Check for callable or undetectable objects => false.
5481 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5482 __ andi(r0, scratch,
5483 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5484 __ cmpi(r0, Operand::Zero());
5485 final_branch_condition = eq;
5486
5487// clang-format off
5488#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5489 } else if (String::Equals(type_name, factory->type##_string())) { \
5490 __ JumpIfSmi(input, false_label); \
5491 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5492 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
5493 final_branch_condition = eq;
5494 SIMD128_TYPES(SIMD128_TYPE)
5495#undef SIMD128_TYPE
5496 // clang-format on
5497
5498 } else {
5499 __ b(false_label);
5500 }
5501
5502 return final_branch_condition;
5503}
5504
5505
5506void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5507 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5508 // Ensure that we have enough space after the previous lazy-bailout
5509 // instruction for patching the code here.
5510 int current_pc = masm()->pc_offset();
5511 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5512 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5513 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5514 while (padding_size > 0) {
5515 __ nop();
5516 padding_size -= Assembler::kInstrSize;
5517 }
5518 }
5519 }
5520 last_lazy_deopt_pc_ = masm()->pc_offset();
5521}
5522
5523
5524void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5525 last_lazy_deopt_pc_ = masm()->pc_offset();
5526 DCHECK(instr->HasEnvironment());
5527 LEnvironment* env = instr->environment();
5528 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5529 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5530}
5531
5532
5533void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5534 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5535 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5536 // needed return address), even though the implementation of LAZY and EAGER is
5537 // now identical. When LAZY is eventually completely folded into EAGER, remove
5538 // the special case below.
5539 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5540 type = Deoptimizer::LAZY;
5541 }
5542
5543 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5544}
5545
5546
5547void LCodeGen::DoDummy(LDummy* instr) {
5548 // Nothing to see here, move on!
5549}
5550
5551
5552void LCodeGen::DoDummyUse(LDummyUse* instr) {
5553 // Nothing to see here, move on!
5554}
5555
5556
5557void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5558 PushSafepointRegistersScope scope(this);
5559 LoadContextFromDeferred(instr->context());
5560 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5561 RecordSafepointWithLazyDeopt(
5562 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5563 DCHECK(instr->HasEnvironment());
5564 LEnvironment* env = instr->environment();
5565 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5566}
5567
5568
5569void LCodeGen::DoStackCheck(LStackCheck* instr) {
5570 class DeferredStackCheck final : public LDeferredCode {
5571 public:
5572 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5573 : LDeferredCode(codegen), instr_(instr) {}
5574 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5575 LInstruction* instr() override { return instr_; }
5576
5577 private:
5578 LStackCheck* instr_;
5579 };
5580
5581 DCHECK(instr->HasEnvironment());
5582 LEnvironment* env = instr->environment();
5583 // There is no LLazyBailout instruction for stack-checks. We have to
5584 // prepare for lazy deoptimization explicitly here.
5585 if (instr->hydrogen()->is_function_entry()) {
5586 // Perform stack overflow check.
5587 Label done;
5588 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5589 __ cmpl(sp, ip);
5590 __ bge(&done);
5591 DCHECK(instr->context()->IsRegister());
5592 DCHECK(ToRegister(instr->context()).is(cp));
5593 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5594 instr);
5595 __ bind(&done);
5596 } else {
5597 DCHECK(instr->hydrogen()->is_backwards_branch());
5598 // Perform stack overflow check if this goto needs it before jumping.
5599 DeferredStackCheck* deferred_stack_check =
5600 new (zone()) DeferredStackCheck(this, instr);
5601 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5602 __ cmpl(sp, ip);
5603 __ blt(deferred_stack_check->entry());
5604 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5605 __ bind(instr->done_label());
5606 deferred_stack_check->SetExit(instr->done_label());
5607 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5608 // Don't record a deoptimization index for the safepoint here.
5609 // This will be done explicitly when emitting call and the safepoint in
5610 // the deferred code.
5611 }
5612}
5613
5614
5615void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5616 // This is a pseudo-instruction that ensures that the environment here is
5617 // properly registered for deoptimization and records the assembler's PC
5618 // offset.
5619 LEnvironment* environment = instr->environment();
5620
5621 // If the environment were already registered, we would have no way of
5622 // backpatching it with the spill slot operands.
5623 DCHECK(!environment->HasBeenRegistered());
5624 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5625
5626 GenerateOsrPrologue();
5627}
5628
5629
5630void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005631 Label use_cache, call_runtime;
Ben Murdoch097c5b22016-05-18 11:27:45 +01005632 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005633
5634 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5635 __ b(&use_cache);
5636
5637 // Get the set of properties to enumerate.
5638 __ bind(&call_runtime);
5639 __ push(r3);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005640 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005641 __ bind(&use_cache);
5642}
5643
5644
5645void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5646 Register map = ToRegister(instr->map());
5647 Register result = ToRegister(instr->result());
5648 Label load_cache, done;
5649 __ EnumLength(result, map);
5650 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5651 __ bne(&load_cache);
5652 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5653 __ b(&done);
5654
5655 __ bind(&load_cache);
5656 __ LoadInstanceDescriptors(map, result);
5657 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5658 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5659 __ cmpi(result, Operand::Zero());
5660 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5661
5662 __ bind(&done);
5663}
5664
5665
5666void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5667 Register object = ToRegister(instr->value());
5668 Register map = ToRegister(instr->map());
5669 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5670 __ cmp(map, scratch0());
5671 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5672}
5673
5674
5675void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5676 Register result, Register object,
5677 Register index) {
5678 PushSafepointRegistersScope scope(this);
5679 __ Push(object, index);
5680 __ li(cp, Operand::Zero());
5681 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5682 RecordSafepointWithRegisters(instr->pointer_map(), 2,
5683 Safepoint::kNoLazyDeopt);
5684 __ StoreToSafepointRegisterSlot(r3, result);
5685}
5686
5687
5688void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5689 class DeferredLoadMutableDouble final : public LDeferredCode {
5690 public:
5691 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5692 Register result, Register object, Register index)
5693 : LDeferredCode(codegen),
5694 instr_(instr),
5695 result_(result),
5696 object_(object),
5697 index_(index) {}
5698 void Generate() override {
5699 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5700 }
5701 LInstruction* instr() override { return instr_; }
5702
5703 private:
5704 LLoadFieldByIndex* instr_;
5705 Register result_;
5706 Register object_;
5707 Register index_;
5708 };
5709
5710 Register object = ToRegister(instr->object());
5711 Register index = ToRegister(instr->index());
5712 Register result = ToRegister(instr->result());
5713 Register scratch = scratch0();
5714
5715 DeferredLoadMutableDouble* deferred;
5716 deferred = new (zone())
5717 DeferredLoadMutableDouble(this, instr, result, object, index);
5718
5719 Label out_of_object, done;
5720
5721 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5722 __ bne(deferred->entry(), cr0);
5723 __ ShiftRightArithImm(index, index, 1);
5724
5725 __ cmpi(index, Operand::Zero());
5726 __ blt(&out_of_object);
5727
5728 __ SmiToPtrArrayOffset(r0, index);
5729 __ add(scratch, object, r0);
5730 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5731
5732 __ b(&done);
5733
5734 __ bind(&out_of_object);
5735 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5736 // Index is equal to negated out of object property index plus 1.
5737 __ SmiToPtrArrayOffset(r0, index);
5738 __ sub(scratch, result, r0);
5739 __ LoadP(result,
5740 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5741 __ bind(deferred->exit());
5742 __ bind(&done);
5743}
5744
5745
5746void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5747 Register context = ToRegister(instr->context());
5748 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5749}
5750
5751
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005752#undef __
5753} // namespace internal
5754} // namespace v8