blob: a8f22be732521e0f1c6546a8a312cf7decc58d07 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_X87
6
7#include "src/crankshaft/x87/lithium-codegen-x87.h"
8
9#include "src/base/bits.h"
10#include "src/code-factory.h"
11#include "src/code-stubs.h"
12#include "src/codegen.h"
13#include "src/crankshaft/hydrogen-osr.h"
14#include "src/deoptimizer.h"
15#include "src/ic/ic.h"
16#include "src/ic/stub-cache.h"
17#include "src/profiler/cpu-profiler.h"
18#include "src/x87/frames-x87.h"
19
20namespace v8 {
21namespace internal {
22
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000023// When invoking builtins, we need to record the safepoint in the middle of
24// the invoke instruction sequence generated by the macro assembler.
25class SafepointGenerator final : public CallWrapper {
26 public:
27 SafepointGenerator(LCodeGen* codegen,
28 LPointerMap* pointers,
29 Safepoint::DeoptMode mode)
30 : codegen_(codegen),
31 pointers_(pointers),
32 deopt_mode_(mode) {}
33 virtual ~SafepointGenerator() {}
34
35 void BeforeCall(int call_size) const override {}
36
37 void AfterCall() const override {
38 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 }
40
41 private:
42 LCodeGen* codegen_;
43 LPointerMap* pointers_;
44 Safepoint::DeoptMode deopt_mode_;
45};
46
47
48#define __ masm()->
49
50bool LCodeGen::GenerateCode() {
51 LPhase phase("Z_Code generation", chunk());
52 DCHECK(is_unused());
53 status_ = GENERATING;
54
55 // Open a frame scope to indicate that there is a frame on the stack. The
56 // MANUAL indicates that the scope shouldn't actually generate code to set up
57 // the frame (that is done in GeneratePrologue).
58 FrameScope frame_scope(masm_, StackFrame::MANUAL);
59
60 support_aligned_spilled_doubles_ = info()->IsOptimizing();
61
62 dynamic_frame_alignment_ = info()->IsOptimizing() &&
63 ((chunk()->num_double_slots() > 2 &&
64 !chunk()->graph()->is_recursive()) ||
65 !info()->osr_ast_id().IsNone());
66
67 return GeneratePrologue() &&
68 GenerateBody() &&
69 GenerateDeferredCode() &&
70 GenerateJumpTable() &&
71 GenerateSafepointTable();
72}
73
74
75void LCodeGen::FinishCode(Handle<Code> code) {
76 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010077 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000078 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
79 PopulateDeoptimizationData(code);
80 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
81 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
82 }
83}
84
85
86#ifdef _MSC_VER
87void LCodeGen::MakeSureStackPagesMapped(int offset) {
88 const int kPageSize = 4 * KB;
89 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
90 __ mov(Operand(esp, offset), eax);
91 }
92}
93#endif
94
95
96bool LCodeGen::GeneratePrologue() {
97 DCHECK(is_generating());
98
99 if (info()->IsOptimizing()) {
100 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
101
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000102 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
103 // Move state of dynamic frame alignment into edx.
104 __ Move(edx, Immediate(kNoAlignmentPadding));
105
106 Label do_not_pad, align_loop;
107 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
108 // Align esp + 4 to a multiple of 2 * kPointerSize.
109 __ test(esp, Immediate(kPointerSize));
110 __ j(not_zero, &do_not_pad, Label::kNear);
111 __ push(Immediate(0));
112 __ mov(ebx, esp);
113 __ mov(edx, Immediate(kAlignmentPaddingPushed));
114 // Copy arguments, receiver, and return address.
115 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
116
117 __ bind(&align_loop);
118 __ mov(eax, Operand(ebx, 1 * kPointerSize));
119 __ mov(Operand(ebx, 0), eax);
120 __ add(Operand(ebx), Immediate(kPointerSize));
121 __ dec(ecx);
122 __ j(not_zero, &align_loop, Label::kNear);
123 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
124 __ bind(&do_not_pad);
125 }
126 }
127
128 info()->set_prologue_offset(masm_->pc_offset());
129 if (NeedsEagerFrame()) {
130 DCHECK(!frame_is_built_);
131 frame_is_built_ = true;
132 if (info()->IsStub()) {
133 __ StubPrologue();
134 } else {
135 __ Prologue(info()->GeneratePreagedPrologue());
136 }
137 }
138
139 if (info()->IsOptimizing() &&
140 dynamic_frame_alignment_ &&
141 FLAG_debug_code) {
142 __ test(esp, Immediate(kPointerSize));
143 __ Assert(zero, kFrameIsExpectedToBeAligned);
144 }
145
146 // Reserve space for the stack slots needed by the code.
147 int slots = GetStackSlotCount();
148 DCHECK(slots != 0 || !info()->IsOptimizing());
149 if (slots > 0) {
150 if (slots == 1) {
151 if (dynamic_frame_alignment_) {
152 __ push(edx);
153 } else {
154 __ push(Immediate(kNoAlignmentPadding));
155 }
156 } else {
157 if (FLAG_debug_code) {
158 __ sub(Operand(esp), Immediate(slots * kPointerSize));
159#ifdef _MSC_VER
160 MakeSureStackPagesMapped(slots * kPointerSize);
161#endif
162 __ push(eax);
163 __ mov(Operand(eax), Immediate(slots));
164 Label loop;
165 __ bind(&loop);
166 __ mov(MemOperand(esp, eax, times_4, 0),
167 Immediate(kSlotsZapValue));
168 __ dec(eax);
169 __ j(not_zero, &loop);
170 __ pop(eax);
171 } else {
172 __ sub(Operand(esp), Immediate(slots * kPointerSize));
173#ifdef _MSC_VER
174 MakeSureStackPagesMapped(slots * kPointerSize);
175#endif
176 }
177
178 if (support_aligned_spilled_doubles_) {
179 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
180 // Store dynamic frame alignment state in the first local.
181 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
182 if (dynamic_frame_alignment_) {
183 __ mov(Operand(ebp, offset), edx);
184 } else {
185 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
186 }
187 }
188 }
189 }
190
191 // Initailize FPU state.
192 __ fninit();
193
194 return !is_aborted();
195}
196
197
198void LCodeGen::DoPrologue(LPrologue* instr) {
199 Comment(";;; Prologue begin");
200
201 // Possibly allocate a local context.
202 if (info_->num_heap_slots() > 0) {
203 Comment(";;; Allocate local context");
204 bool need_write_barrier = true;
205 // Argument to NewContext is the function, which is still in edi.
206 int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
207 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
208 if (info()->scope()->is_script_scope()) {
209 __ push(edi);
210 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
211 __ CallRuntime(Runtime::kNewScriptContext);
212 deopt_mode = Safepoint::kLazyDeopt;
213 } else if (slots <= FastNewContextStub::kMaximumSlots) {
214 FastNewContextStub stub(isolate(), slots);
215 __ CallStub(&stub);
216 // Result of FastNewContextStub is always in new space.
217 need_write_barrier = false;
218 } else {
219 __ push(edi);
220 __ CallRuntime(Runtime::kNewFunctionContext);
221 }
222 RecordSafepoint(deopt_mode);
223
224 // Context is returned in eax. It replaces the context passed to us.
225 // It's saved in the stack and kept live in esi.
226 __ mov(esi, eax);
227 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
228
229 // Copy parameters into context if necessary.
230 int num_parameters = scope()->num_parameters();
231 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
232 for (int i = first_parameter; i < num_parameters; i++) {
233 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
234 if (var->IsContextSlot()) {
235 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
236 (num_parameters - 1 - i) * kPointerSize;
237 // Load parameter from stack.
238 __ mov(eax, Operand(ebp, parameter_offset));
239 // Store it in the context.
240 int context_offset = Context::SlotOffset(var->index());
241 __ mov(Operand(esi, context_offset), eax);
242 // Update the write barrier. This clobbers eax and ebx.
243 if (need_write_barrier) {
244 __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
245 kDontSaveFPRegs);
246 } else if (FLAG_debug_code) {
247 Label done;
248 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
249 __ Abort(kExpectedNewSpaceObject);
250 __ bind(&done);
251 }
252 }
253 }
254 Comment(";;; End allocate local context");
255 }
256
257 Comment(";;; Prologue end");
258}
259
260
261void LCodeGen::GenerateOsrPrologue() {
262 // Generate the OSR entry prologue at the first unknown OSR value, or if there
263 // are none, at the OSR entrypoint instruction.
264 if (osr_pc_offset_ >= 0) return;
265
266 osr_pc_offset_ = masm()->pc_offset();
267
268 // Move state of dynamic frame alignment into edx.
269 __ Move(edx, Immediate(kNoAlignmentPadding));
270
271 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
272 Label do_not_pad, align_loop;
273 // Align ebp + 4 to a multiple of 2 * kPointerSize.
274 __ test(ebp, Immediate(kPointerSize));
275 __ j(zero, &do_not_pad, Label::kNear);
276 __ push(Immediate(0));
277 __ mov(ebx, esp);
278 __ mov(edx, Immediate(kAlignmentPaddingPushed));
279
280 // Move all parts of the frame over one word. The frame consists of:
281 // unoptimized frame slots, alignment state, context, frame pointer, return
282 // address, receiver, and the arguments.
283 __ mov(ecx, Immediate(scope()->num_parameters() +
284 5 + graph()->osr()->UnoptimizedFrameSlots()));
285
286 __ bind(&align_loop);
287 __ mov(eax, Operand(ebx, 1 * kPointerSize));
288 __ mov(Operand(ebx, 0), eax);
289 __ add(Operand(ebx), Immediate(kPointerSize));
290 __ dec(ecx);
291 __ j(not_zero, &align_loop, Label::kNear);
292 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
293 __ sub(Operand(ebp), Immediate(kPointerSize));
294 __ bind(&do_not_pad);
295 }
296
297 // Save the first local, which is overwritten by the alignment state.
298 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
299 __ push(alignment_loc);
300
301 // Set the dynamic frame alignment state.
302 __ mov(alignment_loc, edx);
303
304 // Adjust the frame size, subsuming the unoptimized frame into the
305 // optimized frame.
306 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
307 DCHECK(slots >= 1);
308 __ sub(esp, Immediate((slots - 1) * kPointerSize));
309
310 // Initailize FPU state.
311 __ fninit();
312}
313
314
315void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
316 if (instr->IsCall()) {
317 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
318 }
319 if (!instr->IsLazyBailout() && !instr->IsGap()) {
320 safepoints_.BumpLastLazySafepointIndex();
321 }
322 FlushX87StackIfNecessary(instr);
323}
324
325
326void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
327 // When return from function call, FPU should be initialized again.
328 if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
329 bool double_result = instr->HasDoubleRegisterResult();
330 if (double_result) {
331 __ lea(esp, Operand(esp, -kDoubleSize));
332 __ fstp_d(Operand(esp, 0));
333 }
334 __ fninit();
335 if (double_result) {
336 __ fld_d(Operand(esp, 0));
337 __ lea(esp, Operand(esp, kDoubleSize));
338 }
339 }
340 if (instr->IsGoto()) {
341 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
342 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
343 !instr->IsGap() && !instr->IsReturn()) {
344 if (instr->ClobbersDoubleRegisters(isolate())) {
345 if (instr->HasDoubleRegisterResult()) {
346 DCHECK_EQ(1, x87_stack_.depth());
347 } else {
348 DCHECK_EQ(0, x87_stack_.depth());
349 }
350 }
351 __ VerifyX87StackDepth(x87_stack_.depth());
352 }
353}
354
355
356bool LCodeGen::GenerateJumpTable() {
357 if (!jump_table_.length()) return !is_aborted();
358
359 Label needs_frame;
360 Comment(";;; -------------------- Jump table --------------------");
361
362 for (int i = 0; i < jump_table_.length(); i++) {
363 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
364 __ bind(&table_entry->label);
365 Address entry = table_entry->address;
366 DeoptComment(table_entry->deopt_info);
367 if (table_entry->needs_frame) {
368 DCHECK(!info()->saves_caller_doubles());
369 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
370 __ call(&needs_frame);
371 } else {
372 __ call(entry, RelocInfo::RUNTIME_ENTRY);
373 }
374 info()->LogDeoptCallPosition(masm()->pc_offset(),
375 table_entry->deopt_info.inlining_id);
376 }
377 if (needs_frame.is_linked()) {
378 __ bind(&needs_frame);
379
380 /* stack layout
381 4: entry address
382 3: return address <-- esp
383 2: garbage
384 1: garbage
385 0: garbage
386 */
387 __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
388 __ push(MemOperand(esp, kPointerSize)); // Copy return address.
389 __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
390
391 /* stack layout
392 4: entry address
393 3: return address
394 2: garbage
395 1: return address
396 0: entry address <-- esp
397 */
398 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
399
400 // Copy context.
401 __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
402 __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
403 // Fill ebp with the right stack frame address.
404 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
405
406 // This variant of deopt can only be used with stubs. Since we don't
407 // have a function pointer to install in the stack frame that we're
408 // building, install a special marker there instead.
409 DCHECK(info()->IsStub());
410 __ mov(MemOperand(esp, 2 * kPointerSize),
411 Immediate(Smi::FromInt(StackFrame::STUB)));
412
413 /* stack layout
414 4: old ebp
415 3: context pointer
416 2: stub marker
417 1: return address
418 0: entry address <-- esp
419 */
420 __ ret(0); // Call the continuation without clobbering registers.
421 }
422 return !is_aborted();
423}
424
425
426bool LCodeGen::GenerateDeferredCode() {
427 DCHECK(is_generating());
428 if (deferred_.length() > 0) {
429 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
430 LDeferredCode* code = deferred_[i];
431 X87Stack copy(code->x87_stack());
432 x87_stack_ = copy;
433
434 HValue* value =
435 instructions_->at(code->instruction_index())->hydrogen_value();
436 RecordAndWritePosition(
437 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
438
439 Comment(";;; <@%d,#%d> "
440 "-------------------- Deferred %s --------------------",
441 code->instruction_index(),
442 code->instr()->hydrogen_value()->id(),
443 code->instr()->Mnemonic());
444 __ bind(code->entry());
445 if (NeedsDeferredFrame()) {
446 Comment(";;; Build frame");
447 DCHECK(!frame_is_built_);
448 DCHECK(info()->IsStub());
449 frame_is_built_ = true;
450 // Build the frame in such a way that esi isn't trashed.
451 __ push(ebp); // Caller's frame pointer.
452 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
453 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
454 __ lea(ebp, Operand(esp, 2 * kPointerSize));
455 Comment(";;; Deferred code");
456 }
457 code->Generate();
458 if (NeedsDeferredFrame()) {
459 __ bind(code->done());
460 Comment(";;; Destroy frame");
461 DCHECK(frame_is_built_);
462 frame_is_built_ = false;
463 __ mov(esp, ebp);
464 __ pop(ebp);
465 }
466 __ jmp(code->exit());
467 }
468 }
469
470 // Deferred code is the last part of the instruction sequence. Mark
471 // the generated code as done unless we bailed out.
472 if (!is_aborted()) status_ = DONE;
473 return !is_aborted();
474}
475
476
477bool LCodeGen::GenerateSafepointTable() {
478 DCHECK(is_done());
479 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
480 // For lazy deoptimization we need space to patch a call after every call.
481 // Ensure there is always space for such patching, even if the code ends
482 // in a call.
483 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
484 while (masm()->pc_offset() < target_offset) {
485 masm()->nop();
486 }
487 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100488 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000489 return !is_aborted();
490}
491
492
493Register LCodeGen::ToRegister(int code) const {
494 return Register::from_code(code);
495}
496
497
498X87Register LCodeGen::ToX87Register(int code) const {
499 return X87Register::from_code(code);
500}
501
502
503void LCodeGen::X87LoadForUsage(X87Register reg) {
504 DCHECK(x87_stack_.Contains(reg));
505 x87_stack_.Fxch(reg);
506 x87_stack_.pop();
507}
508
509
510void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
511 DCHECK(x87_stack_.Contains(reg1));
512 DCHECK(x87_stack_.Contains(reg2));
513 if (reg1.is(reg2) && x87_stack_.depth() == 1) {
514 __ fld(x87_stack_.st(reg1));
515 x87_stack_.push(reg1);
516 x87_stack_.pop();
517 x87_stack_.pop();
518 } else {
519 x87_stack_.Fxch(reg1, 1);
520 x87_stack_.Fxch(reg2);
521 x87_stack_.pop();
522 x87_stack_.pop();
523 }
524}
525
526
527int LCodeGen::X87Stack::GetLayout() {
528 int layout = stack_depth_;
529 for (int i = 0; i < stack_depth_; i++) {
530 layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
531 }
532
533 return layout;
534}
535
536
537void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
538 DCHECK(is_mutable_);
539 DCHECK(Contains(reg) && stack_depth_ > other_slot);
540 int i = ArrayIndex(reg);
541 int st = st2idx(i);
542 if (st != other_slot) {
543 int other_i = st2idx(other_slot);
544 X87Register other = stack_[other_i];
545 stack_[other_i] = reg;
546 stack_[i] = other;
547 if (st == 0) {
548 __ fxch(other_slot);
549 } else if (other_slot == 0) {
550 __ fxch(st);
551 } else {
552 __ fxch(st);
553 __ fxch(other_slot);
554 __ fxch(st);
555 }
556 }
557}
558
559
560int LCodeGen::X87Stack::st2idx(int pos) {
561 return stack_depth_ - pos - 1;
562}
563
564
565int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
566 for (int i = 0; i < stack_depth_; i++) {
567 if (stack_[i].is(reg)) return i;
568 }
569 UNREACHABLE();
570 return -1;
571}
572
573
574bool LCodeGen::X87Stack::Contains(X87Register reg) {
575 for (int i = 0; i < stack_depth_; i++) {
576 if (stack_[i].is(reg)) return true;
577 }
578 return false;
579}
580
581
582void LCodeGen::X87Stack::Free(X87Register reg) {
583 DCHECK(is_mutable_);
584 DCHECK(Contains(reg));
585 int i = ArrayIndex(reg);
586 int st = st2idx(i);
587 if (st > 0) {
588 // keep track of how fstp(i) changes the order of elements
589 int tos_i = st2idx(0);
590 stack_[i] = stack_[tos_i];
591 }
592 pop();
593 __ fstp(st);
594}
595
596
597void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
598 if (x87_stack_.Contains(dst)) {
599 x87_stack_.Fxch(dst);
600 __ fstp(0);
601 } else {
602 x87_stack_.push(dst);
603 }
604 X87Fld(src, opts);
605}
606
607
608void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
609 if (x87_stack_.Contains(dst)) {
610 x87_stack_.Fxch(dst);
611 __ fstp(0);
612 x87_stack_.pop();
613 // Push ST(i) onto the FPU register stack
614 __ fld(x87_stack_.st(src));
615 x87_stack_.push(dst);
616 } else {
617 // Push ST(i) onto the FPU register stack
618 __ fld(x87_stack_.st(src));
619 x87_stack_.push(dst);
620 }
621}
622
623
624void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
625 DCHECK(!src.is_reg_only());
626 switch (opts) {
627 case kX87DoubleOperand:
628 __ fld_d(src);
629 break;
630 case kX87FloatOperand:
631 __ fld_s(src);
632 break;
633 case kX87IntOperand:
634 __ fild_s(src);
635 break;
636 default:
637 UNREACHABLE();
638 }
639}
640
641
642void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
643 DCHECK(!dst.is_reg_only());
644 x87_stack_.Fxch(src);
645 switch (opts) {
646 case kX87DoubleOperand:
647 __ fst_d(dst);
648 break;
649 case kX87FloatOperand:
650 __ fst_s(dst);
651 break;
652 case kX87IntOperand:
653 __ fist_s(dst);
654 break;
655 default:
656 UNREACHABLE();
657 }
658}
659
660
661void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
662 DCHECK(is_mutable_);
663 if (Contains(reg)) {
664 Free(reg);
665 }
666 // Mark this register as the next register to write to
667 stack_[stack_depth_] = reg;
668}
669
670
671void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
672 DCHECK(is_mutable_);
673 // Assert the reg is prepared to write, but not on the virtual stack yet
674 DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
675 stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
676 stack_depth_++;
677}
678
679
680void LCodeGen::X87PrepareBinaryOp(
681 X87Register left, X87Register right, X87Register result) {
682 // You need to use DefineSameAsFirst for x87 instructions
683 DCHECK(result.is(left));
684 x87_stack_.Fxch(right, 1);
685 x87_stack_.Fxch(left);
686}
687
688
689void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
690 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
691 bool double_inputs = instr->HasDoubleRegisterInput();
692
693 // Flush stack from tos down, since FreeX87() will mess with tos
694 for (int i = stack_depth_-1; i >= 0; i--) {
695 X87Register reg = stack_[i];
696 // Skip registers which contain the inputs for the next instruction
697 // when flushing the stack
698 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
699 continue;
700 }
701 Free(reg);
702 if (i < stack_depth_-1) i++;
703 }
704 }
705 if (instr->IsReturn()) {
706 while (stack_depth_ > 0) {
707 __ fstp(0);
708 stack_depth_--;
709 }
710 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
711 }
712}
713
714
715void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
716 LCodeGen* cgen) {
717 // For going to a joined block, an explicit LClobberDoubles is inserted before
718 // LGoto. Because all used x87 registers are spilled to stack slots. The
719 // ResolvePhis phase of register allocator could guarantee the two input's x87
720 // stacks have the same layout. So don't check stack_depth_ <= 1 here.
721 int goto_block_id = goto_instr->block_id();
722 if (current_block_id + 1 != goto_block_id) {
723 // If we have a value on the x87 stack on leaving a block, it must be a
724 // phi input. If the next block we compile is not the join block, we have
725 // to discard the stack state.
726 // Before discarding the stack state, we need to save it if the "goto block"
727 // has unreachable last predecessor when FLAG_unreachable_code_elimination.
728 if (FLAG_unreachable_code_elimination) {
729 int length = goto_instr->block()->predecessors()->length();
730 bool has_unreachable_last_predecessor = false;
731 for (int i = 0; i < length; i++) {
732 HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
733 if (block->IsUnreachable() &&
734 (block->block_id() + 1) == goto_block_id) {
735 has_unreachable_last_predecessor = true;
736 }
737 }
738 if (has_unreachable_last_predecessor) {
739 if (cgen->x87_stack_map_.find(goto_block_id) ==
740 cgen->x87_stack_map_.end()) {
741 X87Stack* stack = new (cgen->zone()) X87Stack(*this);
742 cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
743 }
744 }
745 }
746
747 // Discard the stack state.
748 stack_depth_ = 0;
749 }
750}
751
752
753void LCodeGen::EmitFlushX87ForDeopt() {
754 // The deoptimizer does not support X87 Registers. But as long as we
755 // deopt from a stub its not a problem, since we will re-materialize the
756 // original stub inputs, which can't be double registers.
757 // DCHECK(info()->IsStub());
758 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
759 __ pushfd();
760 __ VerifyX87StackDepth(x87_stack_.depth());
761 __ popfd();
762 }
763
764 // Flush X87 stack in the deoptimizer entry.
765}
766
767
768Register LCodeGen::ToRegister(LOperand* op) const {
769 DCHECK(op->IsRegister());
770 return ToRegister(op->index());
771}
772
773
774X87Register LCodeGen::ToX87Register(LOperand* op) const {
775 DCHECK(op->IsDoubleRegister());
776 return ToX87Register(op->index());
777}
778
779
780int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
781 return ToRepresentation(op, Representation::Integer32());
782}
783
784
785int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
786 const Representation& r) const {
787 HConstant* constant = chunk_->LookupConstant(op);
788 if (r.IsExternal()) {
789 return reinterpret_cast<int32_t>(
790 constant->ExternalReferenceValue().address());
791 }
792 int32_t value = constant->Integer32Value();
793 if (r.IsInteger32()) return value;
794 DCHECK(r.IsSmiOrTagged());
795 return reinterpret_cast<int32_t>(Smi::FromInt(value));
796}
797
798
799Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
800 HConstant* constant = chunk_->LookupConstant(op);
801 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
802 return constant->handle(isolate());
803}
804
805
806double LCodeGen::ToDouble(LConstantOperand* op) const {
807 HConstant* constant = chunk_->LookupConstant(op);
808 DCHECK(constant->HasDoubleValue());
809 return constant->DoubleValue();
810}
811
812
813ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
814 HConstant* constant = chunk_->LookupConstant(op);
815 DCHECK(constant->HasExternalReferenceValue());
816 return constant->ExternalReferenceValue();
817}
818
819
820bool LCodeGen::IsInteger32(LConstantOperand* op) const {
821 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
822}
823
824
825bool LCodeGen::IsSmi(LConstantOperand* op) const {
826 return chunk_->LookupLiteralRepresentation(op).IsSmi();
827}
828
829
830static int ArgumentsOffsetWithoutFrame(int index) {
831 DCHECK(index < 0);
832 return -(index + 1) * kPointerSize + kPCOnStackSize;
833}
834
835
836Operand LCodeGen::ToOperand(LOperand* op) const {
837 if (op->IsRegister()) return Operand(ToRegister(op));
838 DCHECK(!op->IsDoubleRegister());
839 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
840 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100841 return Operand(ebp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000842 } else {
843 // Retrieve parameter without eager stack-frame relative to the
844 // stack-pointer.
845 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
846 }
847}
848
849
850Operand LCodeGen::HighOperand(LOperand* op) {
851 DCHECK(op->IsDoubleStackSlot());
852 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100853 return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854 } else {
855 // Retrieve parameter without eager stack-frame relative to the
856 // stack-pointer.
857 return Operand(
858 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
859 }
860}
861
862
863void LCodeGen::WriteTranslation(LEnvironment* environment,
864 Translation* translation) {
865 if (environment == NULL) return;
866
867 // The translation includes one command per value in the environment.
868 int translation_size = environment->translation_size();
869
870 WriteTranslation(environment->outer(), translation);
871 WriteTranslationFrame(environment, translation);
872
873 int object_index = 0;
874 int dematerialized_index = 0;
875 for (int i = 0; i < translation_size; ++i) {
876 LOperand* value = environment->values()->at(i);
877 AddToTranslation(environment,
878 translation,
879 value,
880 environment->HasTaggedValueAt(i),
881 environment->HasUint32ValueAt(i),
882 &object_index,
883 &dematerialized_index);
884 }
885}
886
887
888void LCodeGen::AddToTranslation(LEnvironment* environment,
889 Translation* translation,
890 LOperand* op,
891 bool is_tagged,
892 bool is_uint32,
893 int* object_index_pointer,
894 int* dematerialized_index_pointer) {
895 if (op == LEnvironment::materialization_marker()) {
896 int object_index = (*object_index_pointer)++;
897 if (environment->ObjectIsDuplicateAt(object_index)) {
898 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
899 translation->DuplicateObject(dupe_of);
900 return;
901 }
902 int object_length = environment->ObjectLengthAt(object_index);
903 if (environment->ObjectIsArgumentsAt(object_index)) {
904 translation->BeginArgumentsObject(object_length);
905 } else {
906 translation->BeginCapturedObject(object_length);
907 }
908 int dematerialized_index = *dematerialized_index_pointer;
909 int env_offset = environment->translation_size() + dematerialized_index;
910 *dematerialized_index_pointer += object_length;
911 for (int i = 0; i < object_length; ++i) {
912 LOperand* value = environment->values()->at(env_offset + i);
913 AddToTranslation(environment,
914 translation,
915 value,
916 environment->HasTaggedValueAt(env_offset + i),
917 environment->HasUint32ValueAt(env_offset + i),
918 object_index_pointer,
919 dematerialized_index_pointer);
920 }
921 return;
922 }
923
924 if (op->IsStackSlot()) {
925 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000926 if (is_tagged) {
927 translation->StoreStackSlot(index);
928 } else if (is_uint32) {
929 translation->StoreUint32StackSlot(index);
930 } else {
931 translation->StoreInt32StackSlot(index);
932 }
933 } else if (op->IsDoubleStackSlot()) {
934 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000935 translation->StoreDoubleStackSlot(index);
936 } else if (op->IsRegister()) {
937 Register reg = ToRegister(op);
938 if (is_tagged) {
939 translation->StoreRegister(reg);
940 } else if (is_uint32) {
941 translation->StoreUint32Register(reg);
942 } else {
943 translation->StoreInt32Register(reg);
944 }
945 } else if (op->IsDoubleRegister()) {
946 X87Register reg = ToX87Register(op);
947 translation->StoreDoubleRegister(reg);
948 } else if (op->IsConstantOperand()) {
949 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
950 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
951 translation->StoreLiteral(src_index);
952 } else {
953 UNREACHABLE();
954 }
955}
956
957
958void LCodeGen::CallCodeGeneric(Handle<Code> code,
959 RelocInfo::Mode mode,
960 LInstruction* instr,
961 SafepointMode safepoint_mode) {
962 DCHECK(instr != NULL);
963 __ call(code, mode);
964 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
965
966 // Signal that we don't inline smi code before these stubs in the
967 // optimizing code generator.
968 if (code->kind() == Code::BINARY_OP_IC ||
969 code->kind() == Code::COMPARE_IC) {
970 __ nop();
971 }
972}
973
974
975void LCodeGen::CallCode(Handle<Code> code,
976 RelocInfo::Mode mode,
977 LInstruction* instr) {
978 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
979}
980
981
982void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
983 LInstruction* instr, SaveFPRegsMode save_doubles) {
984 DCHECK(instr != NULL);
985 DCHECK(instr->HasPointerMap());
986
987 __ CallRuntime(fun, argc, save_doubles);
988
989 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
990
991 DCHECK(info()->is_calling());
992}
993
994
995void LCodeGen::LoadContextFromDeferred(LOperand* context) {
996 if (context->IsRegister()) {
997 if (!ToRegister(context).is(esi)) {
998 __ mov(esi, ToRegister(context));
999 }
1000 } else if (context->IsStackSlot()) {
1001 __ mov(esi, ToOperand(context));
1002 } else if (context->IsConstantOperand()) {
1003 HConstant* constant =
1004 chunk_->LookupConstant(LConstantOperand::cast(context));
1005 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1006 } else {
1007 UNREACHABLE();
1008 }
1009}
1010
1011void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1012 int argc,
1013 LInstruction* instr,
1014 LOperand* context) {
1015 LoadContextFromDeferred(context);
1016
1017 __ CallRuntimeSaveDoubles(id);
1018 RecordSafepointWithRegisters(
1019 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1020
1021 DCHECK(info()->is_calling());
1022}
1023
1024
1025void LCodeGen::RegisterEnvironmentForDeoptimization(
1026 LEnvironment* environment, Safepoint::DeoptMode mode) {
1027 environment->set_has_been_used();
1028 if (!environment->HasBeenRegistered()) {
1029 // Physical stack frame layout:
1030 // -x ............. -4 0 ..................................... y
1031 // [incoming arguments] [spill slots] [pushed outgoing arguments]
1032
1033 // Layout of the environment:
1034 // 0 ..................................................... size-1
1035 // [parameters] [locals] [expression stack including arguments]
1036
1037 // Layout of the translation:
1038 // 0 ........................................................ size - 1 + 4
1039 // [expression stack including arguments] [locals] [4 words] [parameters]
1040 // |>------------ translation_size ------------<|
1041
1042 int frame_count = 0;
1043 int jsframe_count = 0;
1044 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1045 ++frame_count;
1046 if (e->frame_type() == JS_FUNCTION) {
1047 ++jsframe_count;
1048 }
1049 }
1050 Translation translation(&translations_, frame_count, jsframe_count, zone());
1051 WriteTranslation(environment, &translation);
1052 int deoptimization_index = deoptimizations_.length();
1053 int pc_offset = masm()->pc_offset();
1054 environment->Register(deoptimization_index,
1055 translation.index(),
1056 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1057 deoptimizations_.Add(environment, zone());
1058 }
1059}
1060
1061
1062void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1063 Deoptimizer::DeoptReason deopt_reason,
1064 Deoptimizer::BailoutType bailout_type) {
1065 LEnvironment* environment = instr->environment();
1066 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1067 DCHECK(environment->HasBeenRegistered());
1068 int id = environment->deoptimization_index();
1069 Address entry =
1070 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1071 if (entry == NULL) {
1072 Abort(kBailoutWasNotPrepared);
1073 return;
1074 }
1075
1076 if (DeoptEveryNTimes()) {
1077 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1078 Label no_deopt;
1079 __ pushfd();
1080 __ push(eax);
1081 __ mov(eax, Operand::StaticVariable(count));
1082 __ sub(eax, Immediate(1));
1083 __ j(not_zero, &no_deopt, Label::kNear);
1084 if (FLAG_trap_on_deopt) __ int3();
1085 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1086 __ mov(Operand::StaticVariable(count), eax);
1087 __ pop(eax);
1088 __ popfd();
1089 DCHECK(frame_is_built_);
1090 // Put the x87 stack layout in TOS.
1091 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1092 __ push(Immediate(x87_stack_.GetLayout()));
1093 __ fild_s(MemOperand(esp, 0));
1094 // Don't touch eflags.
1095 __ lea(esp, Operand(esp, kPointerSize));
1096 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1097 __ bind(&no_deopt);
1098 __ mov(Operand::StaticVariable(count), eax);
1099 __ pop(eax);
1100 __ popfd();
1101 }
1102
1103 // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
1104 // the correct location.
1105 {
1106 Label done;
1107 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1108 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1109
1110 int x87_stack_layout = x87_stack_.GetLayout();
1111 __ push(Immediate(x87_stack_layout));
1112 __ fild_s(MemOperand(esp, 0));
1113 // Don't touch eflags.
1114 __ lea(esp, Operand(esp, kPointerSize));
1115 __ bind(&done);
1116 }
1117
1118 if (info()->ShouldTrapOnDeopt()) {
1119 Label done;
1120 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1121 __ int3();
1122 __ bind(&done);
1123 }
1124
1125 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
1126
1127 DCHECK(info()->IsStub() || frame_is_built_);
1128 if (cc == no_condition && frame_is_built_) {
1129 DeoptComment(deopt_info);
1130 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1131 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
1132 } else {
1133 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
1134 !frame_is_built_);
1135 // We often have several deopts to the same entry, reuse the last
1136 // jump entry if this is the case.
1137 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1138 jump_table_.is_empty() ||
1139 !table_entry.IsEquivalentTo(jump_table_.last())) {
1140 jump_table_.Add(table_entry, zone());
1141 }
1142 if (cc == no_condition) {
1143 __ jmp(&jump_table_.last().label);
1144 } else {
1145 __ j(cc, &jump_table_.last().label);
1146 }
1147 }
1148}
1149
1150
1151void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1152 Deoptimizer::DeoptReason deopt_reason) {
1153 Deoptimizer::BailoutType bailout_type = info()->IsStub()
1154 ? Deoptimizer::LAZY
1155 : Deoptimizer::EAGER;
1156 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
1157}
1158
1159
1160void LCodeGen::RecordSafepointWithLazyDeopt(
1161 LInstruction* instr, SafepointMode safepoint_mode) {
1162 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1163 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1164 } else {
1165 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1166 RecordSafepointWithRegisters(
1167 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1168 }
1169}
1170
1171
1172void LCodeGen::RecordSafepoint(
1173 LPointerMap* pointers,
1174 Safepoint::Kind kind,
1175 int arguments,
1176 Safepoint::DeoptMode deopt_mode) {
1177 DCHECK(kind == expected_safepoint_kind_);
1178 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1179 Safepoint safepoint =
1180 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1181 for (int i = 0; i < operands->length(); i++) {
1182 LOperand* pointer = operands->at(i);
1183 if (pointer->IsStackSlot()) {
1184 safepoint.DefinePointerSlot(pointer->index(), zone());
1185 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1186 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1187 }
1188 }
1189}
1190
1191
1192void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1193 Safepoint::DeoptMode mode) {
1194 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1195}
1196
1197
1198void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1199 LPointerMap empty_pointers(zone());
1200 RecordSafepoint(&empty_pointers, mode);
1201}
1202
1203
1204void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1205 int arguments,
1206 Safepoint::DeoptMode mode) {
1207 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1208}
1209
1210
1211void LCodeGen::RecordAndWritePosition(int position) {
1212 if (position == RelocInfo::kNoPosition) return;
1213 masm()->positions_recorder()->RecordPosition(position);
1214 masm()->positions_recorder()->WriteRecordedPositions();
1215}
1216
1217
1218static const char* LabelType(LLabel* label) {
1219 if (label->is_loop_header()) return " (loop header)";
1220 if (label->is_osr_entry()) return " (OSR entry)";
1221 return "";
1222}
1223
1224
1225void LCodeGen::DoLabel(LLabel* label) {
1226 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1227 current_instruction_,
1228 label->hydrogen_value()->id(),
1229 label->block_id(),
1230 LabelType(label));
1231 __ bind(label->label());
1232 current_block_ = label->block_id();
1233 if (label->block()->predecessors()->length() > 1) {
1234 // A join block's x87 stack is that of its last visited predecessor.
1235 // If the last visited predecessor block is unreachable, the stack state
1236 // will be wrong. In such case, use the x87 stack of reachable predecessor.
1237 X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1238 // Restore x87 stack.
1239 if (it != x87_stack_map_.end()) {
1240 x87_stack_ = *(it->second);
1241 }
1242 }
1243 DoGap(label);
1244}
1245
1246
1247void LCodeGen::DoParallelMove(LParallelMove* move) {
1248 resolver_.Resolve(move);
1249}
1250
1251
1252void LCodeGen::DoGap(LGap* gap) {
1253 for (int i = LGap::FIRST_INNER_POSITION;
1254 i <= LGap::LAST_INNER_POSITION;
1255 i++) {
1256 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1257 LParallelMove* move = gap->GetParallelMove(inner_pos);
1258 if (move != NULL) DoParallelMove(move);
1259 }
1260}
1261
1262
1263void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1264 DoGap(instr);
1265}
1266
1267
1268void LCodeGen::DoParameter(LParameter* instr) {
1269 // Nothing to do.
1270}
1271
1272
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001273void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1274 GenerateOsrPrologue();
1275}
1276
1277
1278void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1279 Register dividend = ToRegister(instr->dividend());
1280 int32_t divisor = instr->divisor();
1281 DCHECK(dividend.is(ToRegister(instr->result())));
1282
1283 // Theoretically, a variation of the branch-free code for integer division by
1284 // a power of 2 (calculating the remainder via an additional multiplication
1285 // (which gets simplified to an 'and') and subtraction) should be faster, and
1286 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1287 // indicate that positive dividends are heavily favored, so the branching
1288 // version performs better.
1289 HMod* hmod = instr->hydrogen();
1290 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1291 Label dividend_is_not_negative, done;
1292 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1293 __ test(dividend, dividend);
1294 __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1295 // Note that this is correct even for kMinInt operands.
1296 __ neg(dividend);
1297 __ and_(dividend, mask);
1298 __ neg(dividend);
1299 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1300 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1301 }
1302 __ jmp(&done, Label::kNear);
1303 }
1304
1305 __ bind(&dividend_is_not_negative);
1306 __ and_(dividend, mask);
1307 __ bind(&done);
1308}
1309
1310
1311void LCodeGen::DoModByConstI(LModByConstI* instr) {
1312 Register dividend = ToRegister(instr->dividend());
1313 int32_t divisor = instr->divisor();
1314 DCHECK(ToRegister(instr->result()).is(eax));
1315
1316 if (divisor == 0) {
1317 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1318 return;
1319 }
1320
1321 __ TruncatingDiv(dividend, Abs(divisor));
1322 __ imul(edx, edx, Abs(divisor));
1323 __ mov(eax, dividend);
1324 __ sub(eax, edx);
1325
1326 // Check for negative zero.
1327 HMod* hmod = instr->hydrogen();
1328 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1329 Label remainder_not_zero;
1330 __ j(not_zero, &remainder_not_zero, Label::kNear);
1331 __ cmp(dividend, Immediate(0));
1332 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1333 __ bind(&remainder_not_zero);
1334 }
1335}
1336
1337
1338void LCodeGen::DoModI(LModI* instr) {
1339 HMod* hmod = instr->hydrogen();
1340
1341 Register left_reg = ToRegister(instr->left());
1342 DCHECK(left_reg.is(eax));
1343 Register right_reg = ToRegister(instr->right());
1344 DCHECK(!right_reg.is(eax));
1345 DCHECK(!right_reg.is(edx));
1346 Register result_reg = ToRegister(instr->result());
1347 DCHECK(result_reg.is(edx));
1348
1349 Label done;
1350 // Check for x % 0, idiv would signal a divide error. We have to
1351 // deopt in this case because we can't return a NaN.
1352 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1353 __ test(right_reg, Operand(right_reg));
1354 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1355 }
1356
1357 // Check for kMinInt % -1, idiv would signal a divide error. We
1358 // have to deopt if we care about -0, because we can't return that.
1359 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1360 Label no_overflow_possible;
1361 __ cmp(left_reg, kMinInt);
1362 __ j(not_equal, &no_overflow_possible, Label::kNear);
1363 __ cmp(right_reg, -1);
1364 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1365 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1366 } else {
1367 __ j(not_equal, &no_overflow_possible, Label::kNear);
1368 __ Move(result_reg, Immediate(0));
1369 __ jmp(&done, Label::kNear);
1370 }
1371 __ bind(&no_overflow_possible);
1372 }
1373
1374 // Sign extend dividend in eax into edx:eax.
1375 __ cdq();
1376
1377 // If we care about -0, test if the dividend is <0 and the result is 0.
1378 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1379 Label positive_left;
1380 __ test(left_reg, Operand(left_reg));
1381 __ j(not_sign, &positive_left, Label::kNear);
1382 __ idiv(right_reg);
1383 __ test(result_reg, Operand(result_reg));
1384 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1385 __ jmp(&done, Label::kNear);
1386 __ bind(&positive_left);
1387 }
1388 __ idiv(right_reg);
1389 __ bind(&done);
1390}
1391
1392
1393void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1394 Register dividend = ToRegister(instr->dividend());
1395 int32_t divisor = instr->divisor();
1396 Register result = ToRegister(instr->result());
1397 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1398 DCHECK(!result.is(dividend));
1399
1400 // Check for (0 / -x) that will produce negative zero.
1401 HDiv* hdiv = instr->hydrogen();
1402 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1403 __ test(dividend, dividend);
1404 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1405 }
1406 // Check for (kMinInt / -1).
1407 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1408 __ cmp(dividend, kMinInt);
1409 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1410 }
1411 // Deoptimize if remainder will not be 0.
1412 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1413 divisor != 1 && divisor != -1) {
1414 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1415 __ test(dividend, Immediate(mask));
1416 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1417 }
1418 __ Move(result, dividend);
1419 int32_t shift = WhichPowerOf2Abs(divisor);
1420 if (shift > 0) {
1421 // The arithmetic shift is always OK, the 'if' is an optimization only.
1422 if (shift > 1) __ sar(result, 31);
1423 __ shr(result, 32 - shift);
1424 __ add(result, dividend);
1425 __ sar(result, shift);
1426 }
1427 if (divisor < 0) __ neg(result);
1428}
1429
1430
1431void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1432 Register dividend = ToRegister(instr->dividend());
1433 int32_t divisor = instr->divisor();
1434 DCHECK(ToRegister(instr->result()).is(edx));
1435
1436 if (divisor == 0) {
1437 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1438 return;
1439 }
1440
1441 // Check for (0 / -x) that will produce negative zero.
1442 HDiv* hdiv = instr->hydrogen();
1443 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1444 __ test(dividend, dividend);
1445 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1446 }
1447
1448 __ TruncatingDiv(dividend, Abs(divisor));
1449 if (divisor < 0) __ neg(edx);
1450
1451 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1452 __ mov(eax, edx);
1453 __ imul(eax, eax, divisor);
1454 __ sub(eax, dividend);
1455 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1456 }
1457}
1458
1459
1460// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1461void LCodeGen::DoDivI(LDivI* instr) {
1462 HBinaryOperation* hdiv = instr->hydrogen();
1463 Register dividend = ToRegister(instr->dividend());
1464 Register divisor = ToRegister(instr->divisor());
1465 Register remainder = ToRegister(instr->temp());
1466 DCHECK(dividend.is(eax));
1467 DCHECK(remainder.is(edx));
1468 DCHECK(ToRegister(instr->result()).is(eax));
1469 DCHECK(!divisor.is(eax));
1470 DCHECK(!divisor.is(edx));
1471
1472 // Check for x / 0.
1473 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1474 __ test(divisor, divisor);
1475 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1476 }
1477
1478 // Check for (0 / -x) that will produce negative zero.
1479 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1480 Label dividend_not_zero;
1481 __ test(dividend, dividend);
1482 __ j(not_zero, &dividend_not_zero, Label::kNear);
1483 __ test(divisor, divisor);
1484 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1485 __ bind(&dividend_not_zero);
1486 }
1487
1488 // Check for (kMinInt / -1).
1489 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1490 Label dividend_not_min_int;
1491 __ cmp(dividend, kMinInt);
1492 __ j(not_zero, &dividend_not_min_int, Label::kNear);
1493 __ cmp(divisor, -1);
1494 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1495 __ bind(&dividend_not_min_int);
1496 }
1497
1498 // Sign extend to edx (= remainder).
1499 __ cdq();
1500 __ idiv(divisor);
1501
1502 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1503 // Deoptimize if remainder is not 0.
1504 __ test(remainder, remainder);
1505 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1506 }
1507}
1508
1509
1510void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1511 Register dividend = ToRegister(instr->dividend());
1512 int32_t divisor = instr->divisor();
1513 DCHECK(dividend.is(ToRegister(instr->result())));
1514
1515 // If the divisor is positive, things are easy: There can be no deopts and we
1516 // can simply do an arithmetic right shift.
1517 if (divisor == 1) return;
1518 int32_t shift = WhichPowerOf2Abs(divisor);
1519 if (divisor > 1) {
1520 __ sar(dividend, shift);
1521 return;
1522 }
1523
1524 // If the divisor is negative, we have to negate and handle edge cases.
1525 __ neg(dividend);
1526 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1527 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1528 }
1529
1530 // Dividing by -1 is basically negation, unless we overflow.
1531 if (divisor == -1) {
1532 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1533 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1534 }
1535 return;
1536 }
1537
1538 // If the negation could not overflow, simply shifting is OK.
1539 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1540 __ sar(dividend, shift);
1541 return;
1542 }
1543
1544 Label not_kmin_int, done;
1545 __ j(no_overflow, &not_kmin_int, Label::kNear);
1546 __ mov(dividend, Immediate(kMinInt / divisor));
1547 __ jmp(&done, Label::kNear);
1548 __ bind(&not_kmin_int);
1549 __ sar(dividend, shift);
1550 __ bind(&done);
1551}
1552
1553
1554void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1555 Register dividend = ToRegister(instr->dividend());
1556 int32_t divisor = instr->divisor();
1557 DCHECK(ToRegister(instr->result()).is(edx));
1558
1559 if (divisor == 0) {
1560 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1561 return;
1562 }
1563
1564 // Check for (0 / -x) that will produce negative zero.
1565 HMathFloorOfDiv* hdiv = instr->hydrogen();
1566 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1567 __ test(dividend, dividend);
1568 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1569 }
1570
1571 // Easy case: We need no dynamic check for the dividend and the flooring
1572 // division is the same as the truncating division.
1573 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1574 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1575 __ TruncatingDiv(dividend, Abs(divisor));
1576 if (divisor < 0) __ neg(edx);
1577 return;
1578 }
1579
1580 // In the general case we may need to adjust before and after the truncating
1581 // division to get a flooring division.
1582 Register temp = ToRegister(instr->temp3());
1583 DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1584 Label needs_adjustment, done;
1585 __ cmp(dividend, Immediate(0));
1586 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1587 __ TruncatingDiv(dividend, Abs(divisor));
1588 if (divisor < 0) __ neg(edx);
1589 __ jmp(&done, Label::kNear);
1590 __ bind(&needs_adjustment);
1591 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1592 __ TruncatingDiv(temp, Abs(divisor));
1593 if (divisor < 0) __ neg(edx);
1594 __ dec(edx);
1595 __ bind(&done);
1596}
1597
1598
1599// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1600void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1601 HBinaryOperation* hdiv = instr->hydrogen();
1602 Register dividend = ToRegister(instr->dividend());
1603 Register divisor = ToRegister(instr->divisor());
1604 Register remainder = ToRegister(instr->temp());
1605 Register result = ToRegister(instr->result());
1606 DCHECK(dividend.is(eax));
1607 DCHECK(remainder.is(edx));
1608 DCHECK(result.is(eax));
1609 DCHECK(!divisor.is(eax));
1610 DCHECK(!divisor.is(edx));
1611
1612 // Check for x / 0.
1613 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1614 __ test(divisor, divisor);
1615 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1616 }
1617
1618 // Check for (0 / -x) that will produce negative zero.
1619 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1620 Label dividend_not_zero;
1621 __ test(dividend, dividend);
1622 __ j(not_zero, &dividend_not_zero, Label::kNear);
1623 __ test(divisor, divisor);
1624 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1625 __ bind(&dividend_not_zero);
1626 }
1627
1628 // Check for (kMinInt / -1).
1629 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1630 Label dividend_not_min_int;
1631 __ cmp(dividend, kMinInt);
1632 __ j(not_zero, &dividend_not_min_int, Label::kNear);
1633 __ cmp(divisor, -1);
1634 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1635 __ bind(&dividend_not_min_int);
1636 }
1637
1638 // Sign extend to edx (= remainder).
1639 __ cdq();
1640 __ idiv(divisor);
1641
1642 Label done;
1643 __ test(remainder, remainder);
1644 __ j(zero, &done, Label::kNear);
1645 __ xor_(remainder, divisor);
1646 __ sar(remainder, 31);
1647 __ add(result, remainder);
1648 __ bind(&done);
1649}
1650
1651
1652void LCodeGen::DoMulI(LMulI* instr) {
1653 Register left = ToRegister(instr->left());
1654 LOperand* right = instr->right();
1655
1656 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1657 __ mov(ToRegister(instr->temp()), left);
1658 }
1659
1660 if (right->IsConstantOperand()) {
1661 // Try strength reductions on the multiplication.
1662 // All replacement instructions are at most as long as the imul
1663 // and have better latency.
1664 int constant = ToInteger32(LConstantOperand::cast(right));
1665 if (constant == -1) {
1666 __ neg(left);
1667 } else if (constant == 0) {
1668 __ xor_(left, Operand(left));
1669 } else if (constant == 2) {
1670 __ add(left, Operand(left));
1671 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1672 // If we know that the multiplication can't overflow, it's safe to
1673 // use instructions that don't set the overflow flag for the
1674 // multiplication.
1675 switch (constant) {
1676 case 1:
1677 // Do nothing.
1678 break;
1679 case 3:
1680 __ lea(left, Operand(left, left, times_2, 0));
1681 break;
1682 case 4:
1683 __ shl(left, 2);
1684 break;
1685 case 5:
1686 __ lea(left, Operand(left, left, times_4, 0));
1687 break;
1688 case 8:
1689 __ shl(left, 3);
1690 break;
1691 case 9:
1692 __ lea(left, Operand(left, left, times_8, 0));
1693 break;
1694 case 16:
1695 __ shl(left, 4);
1696 break;
1697 default:
1698 __ imul(left, left, constant);
1699 break;
1700 }
1701 } else {
1702 __ imul(left, left, constant);
1703 }
1704 } else {
1705 if (instr->hydrogen()->representation().IsSmi()) {
1706 __ SmiUntag(left);
1707 }
1708 __ imul(left, ToOperand(right));
1709 }
1710
1711 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1712 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1713 }
1714
1715 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1716 // Bail out if the result is supposed to be negative zero.
1717 Label done;
1718 __ test(left, Operand(left));
1719 __ j(not_zero, &done);
1720 if (right->IsConstantOperand()) {
1721 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1722 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1723 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1724 __ cmp(ToRegister(instr->temp()), Immediate(0));
1725 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1726 }
1727 } else {
1728 // Test the non-zero operand for negative sign.
1729 __ or_(ToRegister(instr->temp()), ToOperand(right));
1730 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1731 }
1732 __ bind(&done);
1733 }
1734}
1735
1736
1737void LCodeGen::DoBitI(LBitI* instr) {
1738 LOperand* left = instr->left();
1739 LOperand* right = instr->right();
1740 DCHECK(left->Equals(instr->result()));
1741 DCHECK(left->IsRegister());
1742
1743 if (right->IsConstantOperand()) {
1744 int32_t right_operand =
1745 ToRepresentation(LConstantOperand::cast(right),
1746 instr->hydrogen()->representation());
1747 switch (instr->op()) {
1748 case Token::BIT_AND:
1749 __ and_(ToRegister(left), right_operand);
1750 break;
1751 case Token::BIT_OR:
1752 __ or_(ToRegister(left), right_operand);
1753 break;
1754 case Token::BIT_XOR:
1755 if (right_operand == int32_t(~0)) {
1756 __ not_(ToRegister(left));
1757 } else {
1758 __ xor_(ToRegister(left), right_operand);
1759 }
1760 break;
1761 default:
1762 UNREACHABLE();
1763 break;
1764 }
1765 } else {
1766 switch (instr->op()) {
1767 case Token::BIT_AND:
1768 __ and_(ToRegister(left), ToOperand(right));
1769 break;
1770 case Token::BIT_OR:
1771 __ or_(ToRegister(left), ToOperand(right));
1772 break;
1773 case Token::BIT_XOR:
1774 __ xor_(ToRegister(left), ToOperand(right));
1775 break;
1776 default:
1777 UNREACHABLE();
1778 break;
1779 }
1780 }
1781}
1782
1783
1784void LCodeGen::DoShiftI(LShiftI* instr) {
1785 LOperand* left = instr->left();
1786 LOperand* right = instr->right();
1787 DCHECK(left->Equals(instr->result()));
1788 DCHECK(left->IsRegister());
1789 if (right->IsRegister()) {
1790 DCHECK(ToRegister(right).is(ecx));
1791
1792 switch (instr->op()) {
1793 case Token::ROR:
1794 __ ror_cl(ToRegister(left));
1795 break;
1796 case Token::SAR:
1797 __ sar_cl(ToRegister(left));
1798 break;
1799 case Token::SHR:
1800 __ shr_cl(ToRegister(left));
1801 if (instr->can_deopt()) {
1802 __ test(ToRegister(left), ToRegister(left));
1803 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1804 }
1805 break;
1806 case Token::SHL:
1807 __ shl_cl(ToRegister(left));
1808 break;
1809 default:
1810 UNREACHABLE();
1811 break;
1812 }
1813 } else {
1814 int value = ToInteger32(LConstantOperand::cast(right));
1815 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1816 switch (instr->op()) {
1817 case Token::ROR:
1818 if (shift_count == 0 && instr->can_deopt()) {
1819 __ test(ToRegister(left), ToRegister(left));
1820 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1821 } else {
1822 __ ror(ToRegister(left), shift_count);
1823 }
1824 break;
1825 case Token::SAR:
1826 if (shift_count != 0) {
1827 __ sar(ToRegister(left), shift_count);
1828 }
1829 break;
1830 case Token::SHR:
1831 if (shift_count != 0) {
1832 __ shr(ToRegister(left), shift_count);
1833 } else if (instr->can_deopt()) {
1834 __ test(ToRegister(left), ToRegister(left));
1835 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1836 }
1837 break;
1838 case Token::SHL:
1839 if (shift_count != 0) {
1840 if (instr->hydrogen_value()->representation().IsSmi() &&
1841 instr->can_deopt()) {
1842 if (shift_count != 1) {
1843 __ shl(ToRegister(left), shift_count - 1);
1844 }
1845 __ SmiTag(ToRegister(left));
1846 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1847 } else {
1848 __ shl(ToRegister(left), shift_count);
1849 }
1850 }
1851 break;
1852 default:
1853 UNREACHABLE();
1854 break;
1855 }
1856 }
1857}
1858
1859
1860void LCodeGen::DoSubI(LSubI* instr) {
1861 LOperand* left = instr->left();
1862 LOperand* right = instr->right();
1863 DCHECK(left->Equals(instr->result()));
1864
1865 if (right->IsConstantOperand()) {
1866 __ sub(ToOperand(left),
1867 ToImmediate(right, instr->hydrogen()->representation()));
1868 } else {
1869 __ sub(ToRegister(left), ToOperand(right));
1870 }
1871 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1872 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1873 }
1874}
1875
1876
1877void LCodeGen::DoConstantI(LConstantI* instr) {
1878 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1879}
1880
1881
1882void LCodeGen::DoConstantS(LConstantS* instr) {
1883 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1884}
1885
1886
1887void LCodeGen::DoConstantD(LConstantD* instr) {
1888 uint64_t const bits = instr->bits();
1889 uint32_t const lower = static_cast<uint32_t>(bits);
1890 uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1891 DCHECK(instr->result()->IsDoubleRegister());
1892
1893 __ push(Immediate(upper));
1894 __ push(Immediate(lower));
1895 X87Register reg = ToX87Register(instr->result());
1896 X87Mov(reg, Operand(esp, 0));
1897 __ add(Operand(esp), Immediate(kDoubleSize));
1898}
1899
1900
1901void LCodeGen::DoConstantE(LConstantE* instr) {
1902 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1903}
1904
1905
1906void LCodeGen::DoConstantT(LConstantT* instr) {
1907 Register reg = ToRegister(instr->result());
1908 Handle<Object> object = instr->value(isolate());
1909 AllowDeferredHandleDereference smi_check;
1910 __ LoadObject(reg, object);
1911}
1912
1913
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001914Operand LCodeGen::BuildSeqStringOperand(Register string,
1915 LOperand* index,
1916 String::Encoding encoding) {
1917 if (index->IsConstantOperand()) {
1918 int offset = ToRepresentation(LConstantOperand::cast(index),
1919 Representation::Integer32());
1920 if (encoding == String::TWO_BYTE_ENCODING) {
1921 offset *= kUC16Size;
1922 }
1923 STATIC_ASSERT(kCharSize == 1);
1924 return FieldOperand(string, SeqString::kHeaderSize + offset);
1925 }
1926 return FieldOperand(
1927 string, ToRegister(index),
1928 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1929 SeqString::kHeaderSize);
1930}
1931
1932
1933void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1934 String::Encoding encoding = instr->hydrogen()->encoding();
1935 Register result = ToRegister(instr->result());
1936 Register string = ToRegister(instr->string());
1937
1938 if (FLAG_debug_code) {
1939 __ push(string);
1940 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1941 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1942
1943 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1944 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1945 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1946 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1947 ? one_byte_seq_type : two_byte_seq_type));
1948 __ Check(equal, kUnexpectedStringType);
1949 __ pop(string);
1950 }
1951
1952 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1953 if (encoding == String::ONE_BYTE_ENCODING) {
1954 __ movzx_b(result, operand);
1955 } else {
1956 __ movzx_w(result, operand);
1957 }
1958}
1959
1960
1961void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1962 String::Encoding encoding = instr->hydrogen()->encoding();
1963 Register string = ToRegister(instr->string());
1964
1965 if (FLAG_debug_code) {
1966 Register value = ToRegister(instr->value());
1967 Register index = ToRegister(instr->index());
1968 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1969 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1970 int encoding_mask =
1971 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1972 ? one_byte_seq_type : two_byte_seq_type;
1973 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1974 }
1975
1976 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1977 if (instr->value()->IsConstantOperand()) {
1978 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1979 Representation::Integer32());
1980 DCHECK_LE(0, value);
1981 if (encoding == String::ONE_BYTE_ENCODING) {
1982 DCHECK_LE(value, String::kMaxOneByteCharCode);
1983 __ mov_b(operand, static_cast<int8_t>(value));
1984 } else {
1985 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1986 __ mov_w(operand, static_cast<int16_t>(value));
1987 }
1988 } else {
1989 Register value = ToRegister(instr->value());
1990 if (encoding == String::ONE_BYTE_ENCODING) {
1991 __ mov_b(operand, value);
1992 } else {
1993 __ mov_w(operand, value);
1994 }
1995 }
1996}
1997
1998
1999void LCodeGen::DoAddI(LAddI* instr) {
2000 LOperand* left = instr->left();
2001 LOperand* right = instr->right();
2002
2003 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2004 if (right->IsConstantOperand()) {
2005 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2006 instr->hydrogen()->representation());
2007 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2008 } else {
2009 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2010 __ lea(ToRegister(instr->result()), address);
2011 }
2012 } else {
2013 if (right->IsConstantOperand()) {
2014 __ add(ToOperand(left),
2015 ToImmediate(right, instr->hydrogen()->representation()));
2016 } else {
2017 __ add(ToRegister(left), ToOperand(right));
2018 }
2019 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2020 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
2021 }
2022 }
2023}
2024
2025
2026void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2027 LOperand* left = instr->left();
2028 LOperand* right = instr->right();
2029 DCHECK(left->Equals(instr->result()));
2030 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2031 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2032 Label return_left;
2033 Condition condition = (operation == HMathMinMax::kMathMin)
2034 ? less_equal
2035 : greater_equal;
2036 if (right->IsConstantOperand()) {
2037 Operand left_op = ToOperand(left);
2038 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2039 instr->hydrogen()->representation());
2040 __ cmp(left_op, immediate);
2041 __ j(condition, &return_left, Label::kNear);
2042 __ mov(left_op, immediate);
2043 } else {
2044 Register left_reg = ToRegister(left);
2045 Operand right_op = ToOperand(right);
2046 __ cmp(left_reg, right_op);
2047 __ j(condition, &return_left, Label::kNear);
2048 __ mov(left_reg, right_op);
2049 }
2050 __ bind(&return_left);
2051 } else {
2052 DCHECK(instr->hydrogen()->representation().IsDouble());
2053 Label check_nan_left, check_zero, return_left, return_right;
2054 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2055 X87Register left_reg = ToX87Register(left);
2056 X87Register right_reg = ToX87Register(right);
2057
2058 X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
2059 __ fld(1);
2060 __ fld(1);
2061 __ FCmp();
2062 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2063 __ j(equal, &check_zero, Label::kNear); // left == right.
2064 __ j(condition, &return_left, Label::kNear);
2065 __ jmp(&return_right, Label::kNear);
2066
2067 __ bind(&check_zero);
2068 __ fld(0);
2069 __ fldz();
2070 __ FCmp();
2071 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2072 // At this point, both left and right are either 0 or -0.
2073 if (operation == HMathMinMax::kMathMin) {
2074 // Push st0 and st1 to stack, then pop them to temp registers and OR them,
2075 // load it to left.
2076 Register scratch_reg = ToRegister(instr->temp());
2077 __ fld(1);
2078 __ fld(1);
2079 __ sub(esp, Immediate(2 * kPointerSize));
2080 __ fstp_s(MemOperand(esp, 0));
2081 __ fstp_s(MemOperand(esp, kPointerSize));
2082 __ pop(scratch_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002083 __ or_(MemOperand(esp, 0), scratch_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002084 X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
2085 __ pop(scratch_reg); // restore esp
2086 } else {
2087 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2088 X87Fxch(left_reg);
2089 __ fadd(1);
2090 }
2091 __ jmp(&return_left, Label::kNear);
2092
2093 __ bind(&check_nan_left);
2094 __ fld(0);
2095 __ fld(0);
2096 __ FCmp(); // NaN check.
2097 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2098
2099 __ bind(&return_right);
2100 X87Fxch(left_reg);
2101 X87Mov(left_reg, right_reg);
2102
2103 __ bind(&return_left);
2104 }
2105}
2106
2107
2108void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2109 X87Register left = ToX87Register(instr->left());
2110 X87Register right = ToX87Register(instr->right());
2111 X87Register result = ToX87Register(instr->result());
2112 if (instr->op() != Token::MOD) {
2113 X87PrepareBinaryOp(left, right, result);
2114 }
2115 // Set the precision control to double-precision.
2116 __ X87SetFPUCW(0x027F);
2117 switch (instr->op()) {
2118 case Token::ADD:
2119 __ fadd_i(1);
2120 break;
2121 case Token::SUB:
2122 __ fsub_i(1);
2123 break;
2124 case Token::MUL:
2125 __ fmul_i(1);
2126 break;
2127 case Token::DIV:
2128 __ fdiv_i(1);
2129 break;
2130 case Token::MOD: {
2131 // Pass two doubles as arguments on the stack.
2132 __ PrepareCallCFunction(4, eax);
2133 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2134 X87Mov(Operand(esp, 0), left);
2135 X87Free(right);
2136 DCHECK(left.is(result));
2137 X87PrepareToWrite(result);
2138 __ CallCFunction(
2139 ExternalReference::mod_two_doubles_operation(isolate()),
2140 4);
2141
2142 // Return value is in st(0) on ia32.
2143 X87CommitWrite(result);
2144 break;
2145 }
2146 default:
2147 UNREACHABLE();
2148 break;
2149 }
2150
2151 // Restore the default value of control word.
2152 __ X87SetFPUCW(0x037F);
2153}
2154
2155
2156void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2157 DCHECK(ToRegister(instr->context()).is(esi));
2158 DCHECK(ToRegister(instr->left()).is(edx));
2159 DCHECK(ToRegister(instr->right()).is(eax));
2160 DCHECK(ToRegister(instr->result()).is(eax));
2161
Ben Murdoch097c5b22016-05-18 11:27:45 +01002162 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002163 CallCode(code, RelocInfo::CODE_TARGET, instr);
2164}
2165
2166
2167template<class InstrType>
2168void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2169 int left_block = instr->TrueDestination(chunk_);
2170 int right_block = instr->FalseDestination(chunk_);
2171
2172 int next_block = GetNextEmittedBlock();
2173
2174 if (right_block == left_block || cc == no_condition) {
2175 EmitGoto(left_block);
2176 } else if (left_block == next_block) {
2177 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2178 } else if (right_block == next_block) {
2179 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2180 } else {
2181 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2182 __ jmp(chunk_->GetAssemblyLabel(right_block));
2183 }
2184}
2185
2186
2187template <class InstrType>
2188void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2189 int true_block = instr->TrueDestination(chunk_);
2190 if (cc == no_condition) {
2191 __ jmp(chunk_->GetAssemblyLabel(true_block));
2192 } else {
2193 __ j(cc, chunk_->GetAssemblyLabel(true_block));
2194 }
2195}
2196
2197
2198template<class InstrType>
2199void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2200 int false_block = instr->FalseDestination(chunk_);
2201 if (cc == no_condition) {
2202 __ jmp(chunk_->GetAssemblyLabel(false_block));
2203 } else {
2204 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2205 }
2206}
2207
2208
2209void LCodeGen::DoBranch(LBranch* instr) {
2210 Representation r = instr->hydrogen()->value()->representation();
2211 if (r.IsSmiOrInteger32()) {
2212 Register reg = ToRegister(instr->value());
2213 __ test(reg, Operand(reg));
2214 EmitBranch(instr, not_zero);
2215 } else if (r.IsDouble()) {
2216 X87Register reg = ToX87Register(instr->value());
2217 X87LoadForUsage(reg);
2218 __ fldz();
2219 __ FCmp();
2220 EmitBranch(instr, not_zero);
2221 } else {
2222 DCHECK(r.IsTagged());
2223 Register reg = ToRegister(instr->value());
2224 HType type = instr->hydrogen()->value()->type();
2225 if (type.IsBoolean()) {
2226 DCHECK(!info()->IsStub());
2227 __ cmp(reg, factory()->true_value());
2228 EmitBranch(instr, equal);
2229 } else if (type.IsSmi()) {
2230 DCHECK(!info()->IsStub());
2231 __ test(reg, Operand(reg));
2232 EmitBranch(instr, not_equal);
2233 } else if (type.IsJSArray()) {
2234 DCHECK(!info()->IsStub());
2235 EmitBranch(instr, no_condition);
2236 } else if (type.IsHeapNumber()) {
2237 UNREACHABLE();
2238 } else if (type.IsString()) {
2239 DCHECK(!info()->IsStub());
2240 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2241 EmitBranch(instr, not_equal);
2242 } else {
2243 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2244 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2245
2246 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2247 // undefined -> false.
2248 __ cmp(reg, factory()->undefined_value());
2249 __ j(equal, instr->FalseLabel(chunk_));
2250 }
2251 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2252 // true -> true.
2253 __ cmp(reg, factory()->true_value());
2254 __ j(equal, instr->TrueLabel(chunk_));
2255 // false -> false.
2256 __ cmp(reg, factory()->false_value());
2257 __ j(equal, instr->FalseLabel(chunk_));
2258 }
2259 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2260 // 'null' -> false.
2261 __ cmp(reg, factory()->null_value());
2262 __ j(equal, instr->FalseLabel(chunk_));
2263 }
2264
2265 if (expected.Contains(ToBooleanStub::SMI)) {
2266 // Smis: 0 -> false, all other -> true.
2267 __ test(reg, Operand(reg));
2268 __ j(equal, instr->FalseLabel(chunk_));
2269 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2270 } else if (expected.NeedsMap()) {
2271 // If we need a map later and have a Smi -> deopt.
2272 __ test(reg, Immediate(kSmiTagMask));
2273 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2274 }
2275
2276 Register map = no_reg; // Keep the compiler happy.
2277 if (expected.NeedsMap()) {
2278 map = ToRegister(instr->temp());
2279 DCHECK(!map.is(reg));
2280 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2281
2282 if (expected.CanBeUndetectable()) {
2283 // Undetectable -> false.
2284 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2285 1 << Map::kIsUndetectable);
2286 __ j(not_zero, instr->FalseLabel(chunk_));
2287 }
2288 }
2289
2290 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2291 // spec object -> true.
2292 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2293 __ j(above_equal, instr->TrueLabel(chunk_));
2294 }
2295
2296 if (expected.Contains(ToBooleanStub::STRING)) {
2297 // String value -> false iff empty.
2298 Label not_string;
2299 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2300 __ j(above_equal, &not_string, Label::kNear);
2301 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2302 __ j(not_zero, instr->TrueLabel(chunk_));
2303 __ jmp(instr->FalseLabel(chunk_));
2304 __ bind(&not_string);
2305 }
2306
2307 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2308 // Symbol value -> true.
2309 __ CmpInstanceType(map, SYMBOL_TYPE);
2310 __ j(equal, instr->TrueLabel(chunk_));
2311 }
2312
2313 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2314 // SIMD value -> true.
2315 __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2316 __ j(equal, instr->TrueLabel(chunk_));
2317 }
2318
2319 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2320 // heap number -> false iff +0, -0, or NaN.
2321 Label not_heap_number;
2322 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2323 factory()->heap_number_map());
2324 __ j(not_equal, &not_heap_number, Label::kNear);
2325 __ fldz();
2326 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2327 __ FCmp();
2328 __ j(zero, instr->FalseLabel(chunk_));
2329 __ jmp(instr->TrueLabel(chunk_));
2330 __ bind(&not_heap_number);
2331 }
2332
2333 if (!expected.IsGeneric()) {
2334 // We've seen something for the first time -> deopt.
2335 // This can only happen if we are not generic already.
2336 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2337 }
2338 }
2339 }
2340}
2341
2342
2343void LCodeGen::EmitGoto(int block) {
2344 if (!IsNextEmittedBlock(block)) {
2345 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2346 }
2347}
2348
2349
2350void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2351}
2352
2353
2354void LCodeGen::DoGoto(LGoto* instr) {
2355 EmitGoto(instr->block_id());
2356}
2357
2358
2359Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2360 Condition cond = no_condition;
2361 switch (op) {
2362 case Token::EQ:
2363 case Token::EQ_STRICT:
2364 cond = equal;
2365 break;
2366 case Token::NE:
2367 case Token::NE_STRICT:
2368 cond = not_equal;
2369 break;
2370 case Token::LT:
2371 cond = is_unsigned ? below : less;
2372 break;
2373 case Token::GT:
2374 cond = is_unsigned ? above : greater;
2375 break;
2376 case Token::LTE:
2377 cond = is_unsigned ? below_equal : less_equal;
2378 break;
2379 case Token::GTE:
2380 cond = is_unsigned ? above_equal : greater_equal;
2381 break;
2382 case Token::IN:
2383 case Token::INSTANCEOF:
2384 default:
2385 UNREACHABLE();
2386 }
2387 return cond;
2388}
2389
2390
2391void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2392 LOperand* left = instr->left();
2393 LOperand* right = instr->right();
2394 bool is_unsigned =
2395 instr->is_double() ||
2396 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2397 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2398 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2399
2400 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2401 // We can statically evaluate the comparison.
2402 double left_val = ToDouble(LConstantOperand::cast(left));
2403 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002404 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2405 ? instr->TrueDestination(chunk_)
2406 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002407 EmitGoto(next_block);
2408 } else {
2409 if (instr->is_double()) {
2410 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2411 __ FCmp();
2412 // Don't base result on EFLAGS when a NaN is involved. Instead
2413 // jump to the false block.
2414 __ j(parity_even, instr->FalseLabel(chunk_));
2415 } else {
2416 if (right->IsConstantOperand()) {
2417 __ cmp(ToOperand(left),
2418 ToImmediate(right, instr->hydrogen()->representation()));
2419 } else if (left->IsConstantOperand()) {
2420 __ cmp(ToOperand(right),
2421 ToImmediate(left, instr->hydrogen()->representation()));
2422 // We commuted the operands, so commute the condition.
2423 cc = CommuteCondition(cc);
2424 } else {
2425 __ cmp(ToRegister(left), ToOperand(right));
2426 }
2427 }
2428 EmitBranch(instr, cc);
2429 }
2430}
2431
2432
2433void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2434 Register left = ToRegister(instr->left());
2435
2436 if (instr->right()->IsConstantOperand()) {
2437 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2438 __ CmpObject(left, right);
2439 } else {
2440 Operand right = ToOperand(instr->right());
2441 __ cmp(left, right);
2442 }
2443 EmitBranch(instr, equal);
2444}
2445
2446
2447void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2448 if (instr->hydrogen()->representation().IsTagged()) {
2449 Register input_reg = ToRegister(instr->object());
2450 __ cmp(input_reg, factory()->the_hole_value());
2451 EmitBranch(instr, equal);
2452 return;
2453 }
2454
2455 // Put the value to the top of stack
2456 X87Register src = ToX87Register(instr->object());
2457 X87LoadForUsage(src);
2458 __ fld(0);
2459 __ fld(0);
2460 __ FCmp();
2461 Label ok;
2462 __ j(parity_even, &ok, Label::kNear);
2463 __ fstp(0);
2464 EmitFalseBranch(instr, no_condition);
2465 __ bind(&ok);
2466
2467
2468 __ sub(esp, Immediate(kDoubleSize));
2469 __ fstp_d(MemOperand(esp, 0));
2470
2471 __ add(esp, Immediate(kDoubleSize));
2472 int offset = sizeof(kHoleNanUpper32);
2473 // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
2474 // so we check the upper with 0xffffffff for hole as a temporary fix.
2475 __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
2476 EmitBranch(instr, equal);
2477}
2478
2479
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002480Condition LCodeGen::EmitIsString(Register input,
2481 Register temp1,
2482 Label* is_not_string,
2483 SmiCheck check_needed = INLINE_SMI_CHECK) {
2484 if (check_needed == INLINE_SMI_CHECK) {
2485 __ JumpIfSmi(input, is_not_string);
2486 }
2487
2488 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2489
2490 return cond;
2491}
2492
2493
2494void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2495 Register reg = ToRegister(instr->value());
2496 Register temp = ToRegister(instr->temp());
2497
2498 SmiCheck check_needed =
2499 instr->hydrogen()->value()->type().IsHeapObject()
2500 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2501
2502 Condition true_cond = EmitIsString(
2503 reg, temp, instr->FalseLabel(chunk_), check_needed);
2504
2505 EmitBranch(instr, true_cond);
2506}
2507
2508
2509void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2510 Operand input = ToOperand(instr->value());
2511
2512 __ test(input, Immediate(kSmiTagMask));
2513 EmitBranch(instr, zero);
2514}
2515
2516
2517void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2518 Register input = ToRegister(instr->value());
2519 Register temp = ToRegister(instr->temp());
2520
2521 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2522 STATIC_ASSERT(kSmiTag == 0);
2523 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2524 }
2525 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2526 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2527 1 << Map::kIsUndetectable);
2528 EmitBranch(instr, not_zero);
2529}
2530
2531
2532static Condition ComputeCompareCondition(Token::Value op) {
2533 switch (op) {
2534 case Token::EQ_STRICT:
2535 case Token::EQ:
2536 return equal;
2537 case Token::LT:
2538 return less;
2539 case Token::GT:
2540 return greater;
2541 case Token::LTE:
2542 return less_equal;
2543 case Token::GTE:
2544 return greater_equal;
2545 default:
2546 UNREACHABLE();
2547 return no_condition;
2548 }
2549}
2550
2551
2552void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2553 DCHECK(ToRegister(instr->context()).is(esi));
2554 DCHECK(ToRegister(instr->left()).is(edx));
2555 DCHECK(ToRegister(instr->right()).is(eax));
2556
2557 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2558 CallCode(code, RelocInfo::CODE_TARGET, instr);
2559 __ test(eax, eax);
2560
2561 EmitBranch(instr, ComputeCompareCondition(instr->op()));
2562}
2563
2564
2565static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2566 InstanceType from = instr->from();
2567 InstanceType to = instr->to();
2568 if (from == FIRST_TYPE) return to;
2569 DCHECK(from == to || to == LAST_TYPE);
2570 return from;
2571}
2572
2573
2574static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2575 InstanceType from = instr->from();
2576 InstanceType to = instr->to();
2577 if (from == to) return equal;
2578 if (to == LAST_TYPE) return above_equal;
2579 if (from == FIRST_TYPE) return below_equal;
2580 UNREACHABLE();
2581 return equal;
2582}
2583
2584
2585void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2586 Register input = ToRegister(instr->value());
2587 Register temp = ToRegister(instr->temp());
2588
2589 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2590 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2591 }
2592
2593 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2594 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2595}
2596
2597
2598void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2599 Register input = ToRegister(instr->value());
2600 Register result = ToRegister(instr->result());
2601
2602 __ AssertString(input);
2603
2604 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2605 __ IndexFromHash(result, result);
2606}
2607
2608
2609void LCodeGen::DoHasCachedArrayIndexAndBranch(
2610 LHasCachedArrayIndexAndBranch* instr) {
2611 Register input = ToRegister(instr->value());
2612
2613 __ test(FieldOperand(input, String::kHashFieldOffset),
2614 Immediate(String::kContainsCachedArrayIndexMask));
2615 EmitBranch(instr, equal);
2616}
2617
2618
2619// Branches to a label or falls through with the answer in the z flag. Trashes
2620// the temp registers, but not the input.
2621void LCodeGen::EmitClassOfTest(Label* is_true,
2622 Label* is_false,
2623 Handle<String>class_name,
2624 Register input,
2625 Register temp,
2626 Register temp2) {
2627 DCHECK(!input.is(temp));
2628 DCHECK(!input.is(temp2));
2629 DCHECK(!temp.is(temp2));
2630 __ JumpIfSmi(input, is_false);
2631
2632 __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
2633 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2634 __ j(equal, is_true);
2635 } else {
2636 __ j(equal, is_false);
2637 }
2638
2639 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2640 // Check if the constructor in the map is a function.
2641 __ GetMapConstructor(temp, temp, temp2);
2642 // Objects with a non-function constructor have class 'Object'.
2643 __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2644 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2645 __ j(not_equal, is_true);
2646 } else {
2647 __ j(not_equal, is_false);
2648 }
2649
2650 // temp now contains the constructor function. Grab the
2651 // instance class name from there.
2652 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2653 __ mov(temp, FieldOperand(temp,
2654 SharedFunctionInfo::kInstanceClassNameOffset));
2655 // The class name we are testing against is internalized since it's a literal.
2656 // The name in the constructor is internalized because of the way the context
2657 // is booted. This routine isn't expected to work for random API-created
2658 // classes and it doesn't have to because you can't access it with natives
2659 // syntax. Since both sides are internalized it is sufficient to use an
2660 // identity comparison.
2661 __ cmp(temp, class_name);
2662 // End with the answer in the z flag.
2663}
2664
2665
2666void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2667 Register input = ToRegister(instr->value());
2668 Register temp = ToRegister(instr->temp());
2669 Register temp2 = ToRegister(instr->temp2());
2670
2671 Handle<String> class_name = instr->hydrogen()->class_name();
2672
2673 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2674 class_name, input, temp, temp2);
2675
2676 EmitBranch(instr, equal);
2677}
2678
2679
2680void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2681 Register reg = ToRegister(instr->value());
2682 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2683 EmitBranch(instr, equal);
2684}
2685
2686
2687void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2688 DCHECK(ToRegister(instr->context()).is(esi));
2689 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2690 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2691 DCHECK(ToRegister(instr->result()).is(eax));
2692 InstanceOfStub stub(isolate());
2693 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2694}
2695
2696
2697void LCodeGen::DoHasInPrototypeChainAndBranch(
2698 LHasInPrototypeChainAndBranch* instr) {
2699 Register const object = ToRegister(instr->object());
2700 Register const object_map = ToRegister(instr->scratch());
2701 Register const object_prototype = object_map;
2702 Register const prototype = ToRegister(instr->prototype());
2703
2704 // The {object} must be a spec object. It's sufficient to know that {object}
2705 // is not a smi, since all other non-spec objects have {null} prototypes and
2706 // will be ruled out below.
2707 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2708 __ test(object, Immediate(kSmiTagMask));
2709 EmitFalseBranch(instr, zero);
2710 }
2711
2712 // Loop through the {object}s prototype chain looking for the {prototype}.
2713 __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2714 Label loop;
2715 __ bind(&loop);
2716
2717 // Deoptimize if the object needs to be access checked.
2718 __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
2719 1 << Map::kIsAccessCheckNeeded);
2720 DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
2721 // Deoptimize for proxies.
2722 __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2723 DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
2724
2725 __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2726 __ cmp(object_prototype, prototype);
2727 EmitTrueBranch(instr, equal);
2728 __ cmp(object_prototype, factory()->null_value());
2729 EmitFalseBranch(instr, equal);
2730 __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2731 __ jmp(&loop);
2732}
2733
2734
2735void LCodeGen::DoCmpT(LCmpT* instr) {
2736 Token::Value op = instr->op();
2737
Ben Murdoch097c5b22016-05-18 11:27:45 +01002738 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002739 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2740
2741 Condition condition = ComputeCompareCondition(op);
2742 Label true_value, done;
2743 __ test(eax, Operand(eax));
2744 __ j(condition, &true_value, Label::kNear);
2745 __ mov(ToRegister(instr->result()), factory()->false_value());
2746 __ jmp(&done, Label::kNear);
2747 __ bind(&true_value);
2748 __ mov(ToRegister(instr->result()), factory()->true_value());
2749 __ bind(&done);
2750}
2751
2752
2753void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
2754 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
2755
2756 if (instr->has_constant_parameter_count()) {
2757 int parameter_count = ToInteger32(instr->constant_parameter_count());
2758 if (dynamic_frame_alignment && FLAG_debug_code) {
2759 __ cmp(Operand(esp,
2760 (parameter_count + extra_value_count) * kPointerSize),
2761 Immediate(kAlignmentZapValue));
2762 __ Assert(equal, kExpectedAlignmentMarker);
2763 }
2764 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2765 } else {
2766 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2767 Register reg = ToRegister(instr->parameter_count());
2768 // The argument count parameter is a smi
2769 __ SmiUntag(reg);
2770 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2771 if (dynamic_frame_alignment && FLAG_debug_code) {
2772 DCHECK(extra_value_count == 2);
2773 __ cmp(Operand(esp, reg, times_pointer_size,
2774 extra_value_count * kPointerSize),
2775 Immediate(kAlignmentZapValue));
2776 __ Assert(equal, kExpectedAlignmentMarker);
2777 }
2778
2779 // emit code to restore stack based on instr->parameter_count()
2780 __ pop(return_addr_reg); // save return address
2781 if (dynamic_frame_alignment) {
2782 __ inc(reg); // 1 more for alignment
2783 }
2784 __ shl(reg, kPointerSizeLog2);
2785 __ add(esp, reg);
2786 __ jmp(return_addr_reg);
2787 }
2788}
2789
2790
2791void LCodeGen::DoReturn(LReturn* instr) {
2792 if (FLAG_trace && info()->IsOptimizing()) {
2793 // Preserve the return value on the stack and rely on the runtime call
2794 // to return the value in the same register. We're leaving the code
2795 // managed by the register allocator and tearing down the frame, it's
2796 // safe to write to the context register.
2797 __ push(eax);
2798 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2799 __ CallRuntime(Runtime::kTraceExit);
2800 }
2801 if (dynamic_frame_alignment_) {
2802 // Fetch the state of the dynamic frame alignment.
2803 __ mov(edx, Operand(ebp,
2804 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2805 }
2806 if (NeedsEagerFrame()) {
2807 __ mov(esp, ebp);
2808 __ pop(ebp);
2809 }
2810 if (dynamic_frame_alignment_) {
2811 Label no_padding;
2812 __ cmp(edx, Immediate(kNoAlignmentPadding));
2813 __ j(equal, &no_padding, Label::kNear);
2814
2815 EmitReturn(instr, true);
2816 __ bind(&no_padding);
2817 }
2818
2819 EmitReturn(instr, false);
2820}
2821
2822
2823template <class T>
2824void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2825 Register vector_register = ToRegister(instr->temp_vector());
2826 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2827 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2828 DCHECK(slot_register.is(eax));
2829
2830 AllowDeferredHandleDereference vector_structure_check;
2831 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2832 __ mov(vector_register, vector);
2833 // No need to allocate this register.
2834 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2835 int index = vector->GetIndex(slot);
2836 __ mov(slot_register, Immediate(Smi::FromInt(index)));
2837}
2838
2839
2840template <class T>
2841void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2842 Register vector_register = ToRegister(instr->temp_vector());
2843 Register slot_register = ToRegister(instr->temp_slot());
2844
2845 AllowDeferredHandleDereference vector_structure_check;
2846 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2847 __ mov(vector_register, vector);
2848 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2849 int index = vector->GetIndex(slot);
2850 __ mov(slot_register, Immediate(Smi::FromInt(index)));
2851}
2852
2853
2854void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2855 DCHECK(ToRegister(instr->context()).is(esi));
2856 DCHECK(ToRegister(instr->global_object())
2857 .is(LoadDescriptor::ReceiverRegister()));
2858 DCHECK(ToRegister(instr->result()).is(eax));
2859
2860 __ mov(LoadDescriptor::NameRegister(), instr->name());
2861 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002862 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2863 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2864 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002865 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2866}
2867
2868
2869void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2870 Register context = ToRegister(instr->context());
2871 Register result = ToRegister(instr->result());
2872 __ mov(result, ContextOperand(context, instr->slot_index()));
2873
2874 if (instr->hydrogen()->RequiresHoleCheck()) {
2875 __ cmp(result, factory()->the_hole_value());
2876 if (instr->hydrogen()->DeoptimizesOnHole()) {
2877 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2878 } else {
2879 Label is_not_hole;
2880 __ j(not_equal, &is_not_hole, Label::kNear);
2881 __ mov(result, factory()->undefined_value());
2882 __ bind(&is_not_hole);
2883 }
2884 }
2885}
2886
2887
2888void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2889 Register context = ToRegister(instr->context());
2890 Register value = ToRegister(instr->value());
2891
2892 Label skip_assignment;
2893
2894 Operand target = ContextOperand(context, instr->slot_index());
2895 if (instr->hydrogen()->RequiresHoleCheck()) {
2896 __ cmp(target, factory()->the_hole_value());
2897 if (instr->hydrogen()->DeoptimizesOnHole()) {
2898 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2899 } else {
2900 __ j(not_equal, &skip_assignment, Label::kNear);
2901 }
2902 }
2903
2904 __ mov(target, value);
2905 if (instr->hydrogen()->NeedsWriteBarrier()) {
2906 SmiCheck check_needed =
2907 instr->hydrogen()->value()->type().IsHeapObject()
2908 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2909 Register temp = ToRegister(instr->temp());
2910 int offset = Context::SlotOffset(instr->slot_index());
2911 __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
2912 EMIT_REMEMBERED_SET, check_needed);
2913 }
2914
2915 __ bind(&skip_assignment);
2916}
2917
2918
2919void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2920 HObjectAccess access = instr->hydrogen()->access();
2921 int offset = access.offset();
2922
2923 if (access.IsExternalMemory()) {
2924 Register result = ToRegister(instr->result());
2925 MemOperand operand = instr->object()->IsConstantOperand()
2926 ? MemOperand::StaticVariable(ToExternalReference(
2927 LConstantOperand::cast(instr->object())))
2928 : MemOperand(ToRegister(instr->object()), offset);
2929 __ Load(result, operand, access.representation());
2930 return;
2931 }
2932
2933 Register object = ToRegister(instr->object());
2934 if (instr->hydrogen()->representation().IsDouble()) {
2935 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
2936 return;
2937 }
2938
2939 Register result = ToRegister(instr->result());
2940 if (!access.IsInobject()) {
2941 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2942 object = result;
2943 }
2944 __ Load(result, FieldOperand(object, offset), access.representation());
2945}
2946
2947
2948void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2949 DCHECK(!operand->IsDoubleRegister());
2950 if (operand->IsConstantOperand()) {
2951 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2952 AllowDeferredHandleDereference smi_check;
2953 if (object->IsSmi()) {
2954 __ Push(Handle<Smi>::cast(object));
2955 } else {
2956 __ PushHeapObject(Handle<HeapObject>::cast(object));
2957 }
2958 } else if (operand->IsRegister()) {
2959 __ push(ToRegister(operand));
2960 } else {
2961 __ push(ToOperand(operand));
2962 }
2963}
2964
2965
2966void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2967 DCHECK(ToRegister(instr->context()).is(esi));
2968 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2969 DCHECK(ToRegister(instr->result()).is(eax));
2970
2971 __ mov(LoadDescriptor::NameRegister(), instr->name());
2972 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002973 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2974 isolate(), NOT_INSIDE_TYPEOF,
2975 instr->hydrogen()->initialization_state())
2976 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002977 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2978}
2979
2980
2981void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2982 Register function = ToRegister(instr->function());
2983 Register temp = ToRegister(instr->temp());
2984 Register result = ToRegister(instr->result());
2985
2986 // Get the prototype or initial map from the function.
2987 __ mov(result,
2988 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2989
2990 // Check that the function has a prototype or an initial map.
2991 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2992 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2993
2994 // If the function does not have an initial map, we're done.
2995 Label done;
2996 __ CmpObjectType(result, MAP_TYPE, temp);
2997 __ j(not_equal, &done, Label::kNear);
2998
2999 // Get the prototype from the initial map.
3000 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3001
3002 // All done.
3003 __ bind(&done);
3004}
3005
3006
3007void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3008 Register result = ToRegister(instr->result());
3009 __ LoadRoot(result, instr->index());
3010}
3011
3012
3013void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3014 Register arguments = ToRegister(instr->arguments());
3015 Register result = ToRegister(instr->result());
3016 if (instr->length()->IsConstantOperand() &&
3017 instr->index()->IsConstantOperand()) {
3018 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3019 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3020 int index = (const_length - const_index) + 1;
3021 __ mov(result, Operand(arguments, index * kPointerSize));
3022 } else {
3023 Register length = ToRegister(instr->length());
3024 Operand index = ToOperand(instr->index());
3025 // There are two words between the frame pointer and the last argument.
3026 // Subtracting from length accounts for one of them add one more.
3027 __ sub(length, index);
3028 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3029 }
3030}
3031
3032
3033void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3034 ElementsKind elements_kind = instr->elements_kind();
3035 LOperand* key = instr->key();
3036 if (!key->IsConstantOperand() &&
3037 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3038 elements_kind)) {
3039 __ SmiUntag(ToRegister(key));
3040 }
3041 Operand operand(BuildFastArrayOperand(
3042 instr->elements(),
3043 key,
3044 instr->hydrogen()->key()->representation(),
3045 elements_kind,
3046 instr->base_offset()));
3047 if (elements_kind == FLOAT32_ELEMENTS) {
3048 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3049 } else if (elements_kind == FLOAT64_ELEMENTS) {
3050 X87Mov(ToX87Register(instr->result()), operand);
3051 } else {
3052 Register result(ToRegister(instr->result()));
3053 switch (elements_kind) {
3054 case INT8_ELEMENTS:
3055 __ movsx_b(result, operand);
3056 break;
3057 case UINT8_ELEMENTS:
3058 case UINT8_CLAMPED_ELEMENTS:
3059 __ movzx_b(result, operand);
3060 break;
3061 case INT16_ELEMENTS:
3062 __ movsx_w(result, operand);
3063 break;
3064 case UINT16_ELEMENTS:
3065 __ movzx_w(result, operand);
3066 break;
3067 case INT32_ELEMENTS:
3068 __ mov(result, operand);
3069 break;
3070 case UINT32_ELEMENTS:
3071 __ mov(result, operand);
3072 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3073 __ test(result, Operand(result));
3074 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3075 }
3076 break;
3077 case FLOAT32_ELEMENTS:
3078 case FLOAT64_ELEMENTS:
3079 case FAST_SMI_ELEMENTS:
3080 case FAST_ELEMENTS:
3081 case FAST_DOUBLE_ELEMENTS:
3082 case FAST_HOLEY_SMI_ELEMENTS:
3083 case FAST_HOLEY_ELEMENTS:
3084 case FAST_HOLEY_DOUBLE_ELEMENTS:
3085 case DICTIONARY_ELEMENTS:
3086 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3087 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003088 case FAST_STRING_WRAPPER_ELEMENTS:
3089 case SLOW_STRING_WRAPPER_ELEMENTS:
3090 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003091 UNREACHABLE();
3092 break;
3093 }
3094 }
3095}
3096
3097
3098void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3099 if (instr->hydrogen()->RequiresHoleCheck()) {
3100 Operand hole_check_operand = BuildFastArrayOperand(
3101 instr->elements(), instr->key(),
3102 instr->hydrogen()->key()->representation(),
3103 FAST_DOUBLE_ELEMENTS,
3104 instr->base_offset() + sizeof(kHoleNanLower32));
3105 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3106 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3107 }
3108
3109 Operand double_load_operand = BuildFastArrayOperand(
3110 instr->elements(),
3111 instr->key(),
3112 instr->hydrogen()->key()->representation(),
3113 FAST_DOUBLE_ELEMENTS,
3114 instr->base_offset());
3115 X87Mov(ToX87Register(instr->result()), double_load_operand);
3116}
3117
3118
3119void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3120 Register result = ToRegister(instr->result());
3121
3122 // Load the result.
3123 __ mov(result,
3124 BuildFastArrayOperand(instr->elements(), instr->key(),
3125 instr->hydrogen()->key()->representation(),
3126 FAST_ELEMENTS, instr->base_offset()));
3127
3128 // Check for the hole value.
3129 if (instr->hydrogen()->RequiresHoleCheck()) {
3130 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3131 __ test(result, Immediate(kSmiTagMask));
3132 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
3133 } else {
3134 __ cmp(result, factory()->the_hole_value());
3135 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3136 }
3137 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3138 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3139 Label done;
3140 __ cmp(result, factory()->the_hole_value());
3141 __ j(not_equal, &done);
3142 if (info()->IsStub()) {
3143 // A stub can safely convert the hole to undefined only if the array
3144 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3145 // it needs to bail out.
3146 __ mov(result, isolate()->factory()->array_protector());
3147 __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
3148 Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
3149 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3150 }
3151 __ mov(result, isolate()->factory()->undefined_value());
3152 __ bind(&done);
3153 }
3154}
3155
3156
3157void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3158 if (instr->is_fixed_typed_array()) {
3159 DoLoadKeyedExternalArray(instr);
3160 } else if (instr->hydrogen()->representation().IsDouble()) {
3161 DoLoadKeyedFixedDoubleArray(instr);
3162 } else {
3163 DoLoadKeyedFixedArray(instr);
3164 }
3165}
3166
3167
3168Operand LCodeGen::BuildFastArrayOperand(
3169 LOperand* elements_pointer,
3170 LOperand* key,
3171 Representation key_representation,
3172 ElementsKind elements_kind,
3173 uint32_t base_offset) {
3174 Register elements_pointer_reg = ToRegister(elements_pointer);
3175 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3176 int shift_size = element_shift_size;
3177 if (key->IsConstantOperand()) {
3178 int constant_value = ToInteger32(LConstantOperand::cast(key));
3179 if (constant_value & 0xF0000000) {
3180 Abort(kArrayIndexConstantValueTooBig);
3181 }
3182 return Operand(elements_pointer_reg,
3183 ((constant_value) << shift_size)
3184 + base_offset);
3185 } else {
3186 // Take the tag bit into account while computing the shift size.
3187 if (key_representation.IsSmi() && (shift_size >= 1)) {
3188 shift_size -= kSmiTagSize;
3189 }
3190 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3191 return Operand(elements_pointer_reg,
3192 ToRegister(key),
3193 scale_factor,
3194 base_offset);
3195 }
3196}
3197
3198
3199void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3200 DCHECK(ToRegister(instr->context()).is(esi));
3201 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3202 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3203
3204 if (instr->hydrogen()->HasVectorAndSlot()) {
3205 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3206 }
3207
3208 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003209 isolate(), instr->hydrogen()->initialization_state())
3210 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003211 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3212}
3213
3214
3215void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3216 Register result = ToRegister(instr->result());
3217
3218 if (instr->hydrogen()->from_inlined()) {
3219 __ lea(result, Operand(esp, -2 * kPointerSize));
3220 } else {
3221 // Check for arguments adapter frame.
3222 Label done, adapted;
3223 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3224 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3225 __ cmp(Operand(result),
3226 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3227 __ j(equal, &adapted, Label::kNear);
3228
3229 // No arguments adaptor frame.
3230 __ mov(result, Operand(ebp));
3231 __ jmp(&done, Label::kNear);
3232
3233 // Arguments adaptor frame present.
3234 __ bind(&adapted);
3235 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3236
3237 // Result is the frame pointer for the frame if not adapted and for the real
3238 // frame below the adaptor frame if adapted.
3239 __ bind(&done);
3240 }
3241}
3242
3243
3244void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3245 Operand elem = ToOperand(instr->elements());
3246 Register result = ToRegister(instr->result());
3247
3248 Label done;
3249
3250 // If no arguments adaptor frame the number of arguments is fixed.
3251 __ cmp(ebp, elem);
3252 __ mov(result, Immediate(scope()->num_parameters()));
3253 __ j(equal, &done, Label::kNear);
3254
3255 // Arguments adaptor frame present. Get argument length from there.
3256 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3257 __ mov(result, Operand(result,
3258 ArgumentsAdaptorFrameConstants::kLengthOffset));
3259 __ SmiUntag(result);
3260
3261 // Argument length is in result register.
3262 __ bind(&done);
3263}
3264
3265
3266void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3267 Register receiver = ToRegister(instr->receiver());
3268 Register function = ToRegister(instr->function());
3269
3270 // If the receiver is null or undefined, we have to pass the global
3271 // object as a receiver to normal functions. Values have to be
3272 // passed unchanged to builtins and strict-mode functions.
3273 Label receiver_ok, global_object;
3274 Register scratch = ToRegister(instr->temp());
3275
3276 if (!instr->hydrogen()->known_function()) {
3277 // Do not transform the receiver to object for strict mode
3278 // functions.
3279 __ mov(scratch,
3280 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3281 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3282 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3283 __ j(not_equal, &receiver_ok);
3284
3285 // Do not transform the receiver to object for builtins.
3286 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3287 1 << SharedFunctionInfo::kNativeBitWithinByte);
3288 __ j(not_equal, &receiver_ok);
3289 }
3290
3291 // Normal function. Replace undefined or null with global receiver.
3292 __ cmp(receiver, factory()->null_value());
3293 __ j(equal, &global_object);
3294 __ cmp(receiver, factory()->undefined_value());
3295 __ j(equal, &global_object);
3296
3297 // The receiver should be a JS object.
3298 __ test(receiver, Immediate(kSmiTagMask));
3299 DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
3300 __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
3301 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3302
3303 __ jmp(&receiver_ok, Label::kNear);
3304 __ bind(&global_object);
3305 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3306 __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
3307 __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
3308 __ bind(&receiver_ok);
3309}
3310
3311
3312void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3313 Register receiver = ToRegister(instr->receiver());
3314 Register function = ToRegister(instr->function());
3315 Register length = ToRegister(instr->length());
3316 Register elements = ToRegister(instr->elements());
3317 DCHECK(receiver.is(eax)); // Used for parameter count.
3318 DCHECK(function.is(edi)); // Required by InvokeFunction.
3319 DCHECK(ToRegister(instr->result()).is(eax));
3320
3321 // Copy the arguments to this function possibly from the
3322 // adaptor frame below it.
3323 const uint32_t kArgumentsLimit = 1 * KB;
3324 __ cmp(length, kArgumentsLimit);
3325 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3326
3327 __ push(receiver);
3328 __ mov(receiver, length);
3329
3330 // Loop through the arguments pushing them onto the execution
3331 // stack.
3332 Label invoke, loop;
3333 // length is a small non-negative integer, due to the test above.
3334 __ test(length, Operand(length));
3335 __ j(zero, &invoke, Label::kNear);
3336 __ bind(&loop);
3337 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3338 __ dec(length);
3339 __ j(not_zero, &loop);
3340
3341 // Invoke the function.
3342 __ bind(&invoke);
3343 DCHECK(instr->HasPointerMap());
3344 LPointerMap* pointers = instr->pointer_map();
3345 SafepointGenerator safepoint_generator(
3346 this, pointers, Safepoint::kLazyDeopt);
3347 ParameterCount actual(eax);
3348 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3349 safepoint_generator);
3350}
3351
3352
3353void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3354 __ int3();
3355}
3356
3357
3358void LCodeGen::DoPushArgument(LPushArgument* instr) {
3359 LOperand* argument = instr->value();
3360 EmitPushTaggedOperand(argument);
3361}
3362
3363
3364void LCodeGen::DoDrop(LDrop* instr) {
3365 __ Drop(instr->count());
3366}
3367
3368
3369void LCodeGen::DoThisFunction(LThisFunction* instr) {
3370 Register result = ToRegister(instr->result());
3371 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3372}
3373
3374
3375void LCodeGen::DoContext(LContext* instr) {
3376 Register result = ToRegister(instr->result());
3377 if (info()->IsOptimizing()) {
3378 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3379 } else {
3380 // If there is no frame, the context must be in esi.
3381 DCHECK(result.is(esi));
3382 }
3383}
3384
3385
3386void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3387 DCHECK(ToRegister(instr->context()).is(esi));
3388 __ push(Immediate(instr->hydrogen()->pairs()));
3389 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3390 CallRuntime(Runtime::kDeclareGlobals, instr);
3391}
3392
3393
3394void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3395 int formal_parameter_count, int arity,
3396 LInstruction* instr) {
3397 bool dont_adapt_arguments =
3398 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3399 bool can_invoke_directly =
3400 dont_adapt_arguments || formal_parameter_count == arity;
3401
3402 Register function_reg = edi;
3403
3404 if (can_invoke_directly) {
3405 // Change context.
3406 __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
3407
3408 // Always initialize new target and number of actual arguments.
3409 __ mov(edx, factory()->undefined_value());
3410 __ mov(eax, arity);
3411
3412 // Invoke function directly.
3413 if (function.is_identical_to(info()->closure())) {
3414 __ CallSelf();
3415 } else {
3416 __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3417 }
3418 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3419 } else {
3420 // We need to adapt arguments.
3421 LPointerMap* pointers = instr->pointer_map();
3422 SafepointGenerator generator(
3423 this, pointers, Safepoint::kLazyDeopt);
3424 ParameterCount count(arity);
3425 ParameterCount expected(formal_parameter_count);
3426 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3427 }
3428}
3429
3430
3431void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3432 DCHECK(ToRegister(instr->result()).is(eax));
3433
3434 if (instr->hydrogen()->IsTailCall()) {
3435 if (NeedsEagerFrame()) __ leave();
3436
3437 if (instr->target()->IsConstantOperand()) {
3438 LConstantOperand* target = LConstantOperand::cast(instr->target());
3439 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3440 __ jmp(code, RelocInfo::CODE_TARGET);
3441 } else {
3442 DCHECK(instr->target()->IsRegister());
3443 Register target = ToRegister(instr->target());
3444 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3445 __ jmp(target);
3446 }
3447 } else {
3448 LPointerMap* pointers = instr->pointer_map();
3449 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3450
3451 if (instr->target()->IsConstantOperand()) {
3452 LConstantOperand* target = LConstantOperand::cast(instr->target());
3453 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3454 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3455 __ call(code, RelocInfo::CODE_TARGET);
3456 } else {
3457 DCHECK(instr->target()->IsRegister());
3458 Register target = ToRegister(instr->target());
3459 generator.BeforeCall(__ CallSize(Operand(target)));
3460 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3461 __ call(target);
3462 }
3463 generator.AfterCall();
3464 }
3465}
3466
3467
3468void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3469 DCHECK(ToRegister(instr->function()).is(edi));
3470 DCHECK(ToRegister(instr->result()).is(eax));
3471
3472 // Change context.
3473 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3474
3475 // Always initialize new target and number of actual arguments.
3476 __ mov(edx, factory()->undefined_value());
3477 __ mov(eax, instr->arity());
3478
3479 bool is_self_call = false;
3480 if (instr->hydrogen()->function()->IsConstant()) {
3481 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3482 Handle<JSFunction> jsfun =
3483 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3484 is_self_call = jsfun.is_identical_to(info()->closure());
3485 }
3486
3487 if (is_self_call) {
3488 __ CallSelf();
3489 } else {
3490 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3491 }
3492
3493 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3494}
3495
3496
3497void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3498 Register input_reg = ToRegister(instr->value());
3499 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3500 factory()->heap_number_map());
3501 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3502
3503 Label slow, allocated, done;
3504 Register tmp = input_reg.is(eax) ? ecx : eax;
3505 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3506
3507 // Preserve the value of all registers.
3508 PushSafepointRegistersScope scope(this);
3509
3510 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3511 // Check the sign of the argument. If the argument is positive, just
3512 // return it. We do not need to patch the stack since |input| and
3513 // |result| are the same register and |input| will be restored
3514 // unchanged by popping safepoint registers.
3515 __ test(tmp, Immediate(HeapNumber::kSignMask));
3516 __ j(zero, &done, Label::kNear);
3517
3518 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3519 __ jmp(&allocated, Label::kNear);
3520
3521 // Slow case: Call the runtime system to do the number allocation.
3522 __ bind(&slow);
3523 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3524 instr, instr->context());
3525 // Set the pointer to the new heap number in tmp.
3526 if (!tmp.is(eax)) __ mov(tmp, eax);
3527 // Restore input_reg after call to runtime.
3528 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3529
3530 __ bind(&allocated);
3531 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3532 __ and_(tmp2, ~HeapNumber::kSignMask);
3533 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3534 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3535 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3536 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3537
3538 __ bind(&done);
3539}
3540
3541
3542void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3543 Register input_reg = ToRegister(instr->value());
3544 __ test(input_reg, Operand(input_reg));
3545 Label is_positive;
3546 __ j(not_sign, &is_positive, Label::kNear);
3547 __ neg(input_reg); // Sets flags.
3548 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3549 __ bind(&is_positive);
3550}
3551
3552
3553void LCodeGen::DoMathAbs(LMathAbs* instr) {
3554 // Class for deferred case.
3555 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3556 public:
3557 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3558 LMathAbs* instr,
3559 const X87Stack& x87_stack)
3560 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3561 void Generate() override {
3562 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3563 }
3564 LInstruction* instr() override { return instr_; }
3565
3566 private:
3567 LMathAbs* instr_;
3568 };
3569
3570 DCHECK(instr->value()->Equals(instr->result()));
3571 Representation r = instr->hydrogen()->value()->representation();
3572
3573 if (r.IsDouble()) {
3574 X87Register value = ToX87Register(instr->value());
3575 X87Fxch(value);
3576 __ fabs();
3577 } else if (r.IsSmiOrInteger32()) {
3578 EmitIntegerMathAbs(instr);
3579 } else { // Tagged case.
3580 DeferredMathAbsTaggedHeapNumber* deferred =
3581 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3582 Register input_reg = ToRegister(instr->value());
3583 // Smi check.
3584 __ JumpIfNotSmi(input_reg, deferred->entry());
3585 EmitIntegerMathAbs(instr);
3586 __ bind(deferred->exit());
3587 }
3588}
3589
3590
3591void LCodeGen::DoMathFloor(LMathFloor* instr) {
3592 Register output_reg = ToRegister(instr->result());
3593 X87Register input_reg = ToX87Register(instr->value());
3594 X87Fxch(input_reg);
3595
3596 Label not_minus_zero, done;
3597 // Deoptimize on unordered.
3598 __ fldz();
3599 __ fld(1);
3600 __ FCmp();
3601 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3602 __ j(below, &not_minus_zero, Label::kNear);
3603
3604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3605 // Check for negative zero.
3606 __ j(not_equal, &not_minus_zero, Label::kNear);
3607 // +- 0.0.
3608 __ fld(0);
3609 __ FXamSign();
3610 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3611 __ Move(output_reg, Immediate(0));
3612 __ jmp(&done, Label::kFar);
3613 }
3614
3615 // Positive input.
3616 // rc=01B, round down.
3617 __ bind(&not_minus_zero);
3618 __ fnclex();
3619 __ X87SetRC(0x0400);
3620 __ sub(esp, Immediate(kPointerSize));
3621 __ fist_s(Operand(esp, 0));
3622 __ pop(output_reg);
3623 __ X87CheckIA();
3624 DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
3625 __ fnclex();
3626 __ X87SetRC(0x0000);
3627 __ bind(&done);
3628}
3629
3630
3631void LCodeGen::DoMathRound(LMathRound* instr) {
3632 X87Register input_reg = ToX87Register(instr->value());
3633 Register result = ToRegister(instr->result());
3634 X87Fxch(input_reg);
3635 Label below_one_half, below_minus_one_half, done;
3636
3637 ExternalReference one_half = ExternalReference::address_of_one_half();
3638 ExternalReference minus_one_half =
3639 ExternalReference::address_of_minus_one_half();
3640
3641 __ fld_d(Operand::StaticVariable(one_half));
3642 __ fld(1);
3643 __ FCmp();
3644 __ j(carry, &below_one_half);
3645
3646 // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3647 __ fld(0);
3648 __ fadd_d(Operand::StaticVariable(one_half));
3649 // rc=11B, round toward zero.
3650 __ X87SetRC(0x0c00);
3651 __ sub(esp, Immediate(kPointerSize));
3652 // Clear exception bits.
3653 __ fnclex();
3654 __ fistp_s(MemOperand(esp, 0));
3655 // Check overflow.
3656 __ X87CheckIA();
3657 __ pop(result);
3658 DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3659 __ fnclex();
3660 // Restore round mode.
3661 __ X87SetRC(0x0000);
3662 __ jmp(&done);
3663
3664 __ bind(&below_one_half);
3665 __ fld_d(Operand::StaticVariable(minus_one_half));
3666 __ fld(1);
3667 __ FCmp();
3668 __ j(carry, &below_minus_one_half);
3669 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3670 // we can ignore the difference between a result of -0 and +0.
3671 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3672 // If the sign is positive, we return +0.
3673 __ fld(0);
3674 __ FXamSign();
3675 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3676 }
3677 __ Move(result, Immediate(0));
3678 __ jmp(&done);
3679
3680 __ bind(&below_minus_one_half);
3681 __ fld(0);
3682 __ fadd_d(Operand::StaticVariable(one_half));
3683 // rc=01B, round down.
3684 __ X87SetRC(0x0400);
3685 __ sub(esp, Immediate(kPointerSize));
3686 // Clear exception bits.
3687 __ fnclex();
3688 __ fistp_s(MemOperand(esp, 0));
3689 // Check overflow.
3690 __ X87CheckIA();
3691 __ pop(result);
3692 DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3693 __ fnclex();
3694 // Restore round mode.
3695 __ X87SetRC(0x0000);
3696
3697 __ bind(&done);
3698}
3699
3700
3701void LCodeGen::DoMathFround(LMathFround* instr) {
3702 X87Register input_reg = ToX87Register(instr->value());
3703 X87Fxch(input_reg);
3704 __ sub(esp, Immediate(kPointerSize));
3705 __ fstp_s(MemOperand(esp, 0));
3706 X87Fld(MemOperand(esp, 0), kX87FloatOperand);
3707 __ add(esp, Immediate(kPointerSize));
3708}
3709
3710
3711void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3712 X87Register input_reg = ToX87Register(instr->value());
3713 __ X87SetFPUCW(0x027F);
3714 X87Fxch(input_reg);
3715 __ fsqrt();
3716 __ X87SetFPUCW(0x037F);
3717}
3718
3719
3720void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3721 X87Register input_reg = ToX87Register(instr->value());
3722 DCHECK(ToX87Register(instr->result()).is(input_reg));
3723 X87Fxch(input_reg);
3724 // Note that according to ECMA-262 15.8.2.13:
3725 // Math.pow(-Infinity, 0.5) == Infinity
3726 // Math.sqrt(-Infinity) == NaN
3727 Label done, sqrt;
3728 // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
3729 __ fxam();
3730 __ push(eax);
3731 __ fnstsw_ax();
3732 __ and_(eax, Immediate(0x4700));
3733 __ cmp(eax, Immediate(0x0700));
3734 __ j(not_equal, &sqrt, Label::kNear);
3735 // If input is -Infinity, return Infinity.
3736 __ fchs();
3737 __ jmp(&done, Label::kNear);
3738
3739 // Square root.
3740 __ bind(&sqrt);
3741 __ fldz();
3742 __ faddp(); // Convert -0 to +0.
3743 __ fsqrt();
3744 __ bind(&done);
3745 __ pop(eax);
3746}
3747
3748
3749void LCodeGen::DoPower(LPower* instr) {
3750 Representation exponent_type = instr->hydrogen()->right()->representation();
3751 X87Register result = ToX87Register(instr->result());
3752 // Having marked this as a call, we can use any registers.
3753 X87Register base = ToX87Register(instr->left());
3754 ExternalReference one_half = ExternalReference::address_of_one_half();
3755
3756 if (exponent_type.IsSmi()) {
3757 Register exponent = ToRegister(instr->right());
3758 X87LoadForUsage(base);
3759 __ SmiUntag(exponent);
3760 __ push(exponent);
3761 __ fild_s(MemOperand(esp, 0));
3762 __ pop(exponent);
3763 } else if (exponent_type.IsTagged()) {
3764 Register exponent = ToRegister(instr->right());
3765 Register temp = exponent.is(ecx) ? eax : ecx;
3766 Label no_deopt, done;
3767 X87LoadForUsage(base);
3768 __ JumpIfSmi(exponent, &no_deopt);
3769 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
3770 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3771 // Heap number(double)
3772 __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
3773 __ jmp(&done);
3774 // SMI
3775 __ bind(&no_deopt);
3776 __ SmiUntag(exponent);
3777 __ push(exponent);
3778 __ fild_s(MemOperand(esp, 0));
3779 __ pop(exponent);
3780 __ bind(&done);
3781 } else if (exponent_type.IsInteger32()) {
3782 Register exponent = ToRegister(instr->right());
3783 X87LoadForUsage(base);
3784 __ push(exponent);
3785 __ fild_s(MemOperand(esp, 0));
3786 __ pop(exponent);
3787 } else {
3788 DCHECK(exponent_type.IsDouble());
3789 X87Register exponent_double = ToX87Register(instr->right());
3790 X87LoadForUsage(base, exponent_double);
3791 }
3792
3793 // FP data stack {base, exponent(TOS)}.
3794 // Handle (exponent==+-0.5 && base == -0).
3795 Label not_plus_0;
3796 __ fld(0);
3797 __ fabs();
3798 X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
3799 __ FCmp();
3800 __ j(parity_even, &not_plus_0, Label::kNear); // NaN.
3801 __ j(not_equal, &not_plus_0, Label::kNear);
3802 __ fldz();
3803 // FP data stack {base, exponent(TOS), zero}.
3804 __ faddp(2);
3805 __ bind(&not_plus_0);
3806
3807 {
3808 __ PrepareCallCFunction(4, eax);
3809 __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value.
3810 __ fstp_d(MemOperand(esp, 0)); // Base value.
3811 X87PrepareToWrite(result);
3812 __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
3813 4);
3814 // Return value is in st(0) on ia32.
3815 X87CommitWrite(result);
3816 }
3817}
3818
3819
3820void LCodeGen::DoMathLog(LMathLog* instr) {
3821 DCHECK(instr->value()->Equals(instr->result()));
3822 X87Register input_reg = ToX87Register(instr->value());
3823 X87Fxch(input_reg);
3824
3825 Label positive, done, zero, nan_result;
3826 __ fldz();
3827 __ fld(1);
3828 __ FCmp();
3829 __ j(below, &nan_result, Label::kNear);
3830 __ j(equal, &zero, Label::kNear);
3831 // Positive input.
3832 // {input, ln2}.
3833 __ fldln2();
3834 // {ln2, input}.
3835 __ fxch();
3836 // {result}.
3837 __ fyl2x();
3838 __ jmp(&done, Label::kNear);
3839
3840 __ bind(&nan_result);
3841 X87PrepareToWrite(input_reg);
3842 __ push(Immediate(0xffffffff));
3843 __ push(Immediate(0x7fffffff));
3844 __ fld_d(MemOperand(esp, 0));
3845 __ lea(esp, Operand(esp, kDoubleSize));
3846 X87CommitWrite(input_reg);
3847 __ jmp(&done, Label::kNear);
3848
3849 __ bind(&zero);
3850 ExternalReference ninf = ExternalReference::address_of_negative_infinity();
3851 X87PrepareToWrite(input_reg);
3852 __ fld_d(Operand::StaticVariable(ninf));
3853 X87CommitWrite(input_reg);
3854
3855 __ bind(&done);
3856}
3857
3858
3859void LCodeGen::DoMathClz32(LMathClz32* instr) {
3860 Register input = ToRegister(instr->value());
3861 Register result = ToRegister(instr->result());
3862
3863 __ Lzcnt(result, input);
3864}
3865
3866
3867void LCodeGen::DoMathExp(LMathExp* instr) {
3868 X87Register input = ToX87Register(instr->value());
3869 X87Register result_reg = ToX87Register(instr->result());
3870 Register temp_result = ToRegister(instr->temp1());
3871 Register temp = ToRegister(instr->temp2());
3872 Label slow, done, smi, finish;
3873 DCHECK(result_reg.is(input));
3874
3875 // Store input into Heap number and call runtime function kMathExpRT.
3876 if (FLAG_inline_new) {
3877 __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
3878 __ jmp(&done, Label::kNear);
3879 }
3880
3881 // Slow case: Call the runtime system to do the number allocation.
3882 __ bind(&slow);
3883 {
3884 // TODO(3095996): Put a valid pointer value in the stack slot where the
3885 // result register is stored, as this register is in the pointer map, but
3886 // contains an integer value.
3887 __ Move(temp_result, Immediate(0));
3888
3889 // Preserve the value of all registers.
3890 PushSafepointRegistersScope scope(this);
3891
3892 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3893 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3894 RecordSafepointWithRegisters(instr->pointer_map(), 0,
3895 Safepoint::kNoLazyDeopt);
3896 __ StoreToSafepointRegisterSlot(temp_result, eax);
3897 }
3898 __ bind(&done);
3899 X87LoadForUsage(input);
3900 __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
3901
3902 {
3903 // Preserve the value of all registers.
3904 PushSafepointRegistersScope scope(this);
3905
3906 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3907 __ push(temp_result);
3908 __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
3909 RecordSafepointWithRegisters(instr->pointer_map(), 1,
3910 Safepoint::kNoLazyDeopt);
3911 __ StoreToSafepointRegisterSlot(temp_result, eax);
3912 }
3913 X87PrepareToWrite(result_reg);
3914 // return value of MathExpRT is Smi or Heap Number.
3915 __ JumpIfSmi(temp_result, &smi);
3916 // Heap number(double)
3917 __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
3918 __ jmp(&finish);
3919 // SMI
3920 __ bind(&smi);
3921 __ SmiUntag(temp_result);
3922 __ push(temp_result);
3923 __ fild_s(MemOperand(esp, 0));
3924 __ pop(temp_result);
3925 __ bind(&finish);
3926 X87CommitWrite(result_reg);
3927}
3928
3929
3930void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3931 DCHECK(ToRegister(instr->context()).is(esi));
3932 DCHECK(ToRegister(instr->function()).is(edi));
3933 DCHECK(instr->HasPointerMap());
3934
3935 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3936 if (known_function.is_null()) {
3937 LPointerMap* pointers = instr->pointer_map();
3938 SafepointGenerator generator(
3939 this, pointers, Safepoint::kLazyDeopt);
3940 ParameterCount count(instr->arity());
3941 __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
3942 } else {
3943 CallKnownFunction(known_function,
3944 instr->hydrogen()->formal_parameter_count(),
3945 instr->arity(), instr);
3946 }
3947}
3948
3949
3950void LCodeGen::DoCallFunction(LCallFunction* instr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003951 HCallFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003952 DCHECK(ToRegister(instr->context()).is(esi));
3953 DCHECK(ToRegister(instr->function()).is(edi));
3954 DCHECK(ToRegister(instr->result()).is(eax));
3955
3956 int arity = instr->arity();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003957 ConvertReceiverMode mode = hinstr->convert_mode();
3958 if (hinstr->HasVectorAndSlot()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003959 Register slot_register = ToRegister(instr->temp_slot());
3960 Register vector_register = ToRegister(instr->temp_vector());
3961 DCHECK(slot_register.is(edx));
3962 DCHECK(vector_register.is(ebx));
3963
3964 AllowDeferredHandleDereference vector_structure_check;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003965 Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
3966 int index = vector->GetIndex(hinstr->slot());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003967
3968 __ mov(vector_register, vector);
3969 __ mov(slot_register, Immediate(Smi::FromInt(index)));
3970
3971 Handle<Code> ic =
3972 CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
3973 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3974 } else {
3975 __ Set(eax, arity);
3976 CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
3977 }
3978}
3979
3980
3981void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3982 DCHECK(ToRegister(instr->context()).is(esi));
3983 DCHECK(ToRegister(instr->constructor()).is(edi));
3984 DCHECK(ToRegister(instr->result()).is(eax));
3985
3986 __ Move(eax, Immediate(instr->arity()));
3987 if (instr->arity() == 1) {
3988 // We only need the allocation site for the case we have a length argument.
3989 // The case may bail out to the runtime, which will determine the correct
3990 // elements kind with the site.
3991 __ mov(ebx, instr->hydrogen()->site());
3992 } else {
3993 __ mov(ebx, isolate()->factory()->undefined_value());
3994 }
3995
3996 ElementsKind kind = instr->hydrogen()->elements_kind();
3997 AllocationSiteOverrideMode override_mode =
3998 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3999 ? DISABLE_ALLOCATION_SITES
4000 : DONT_OVERRIDE;
4001
4002 if (instr->arity() == 0) {
4003 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4004 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4005 } else if (instr->arity() == 1) {
4006 Label done;
4007 if (IsFastPackedElementsKind(kind)) {
4008 Label packed_case;
4009 // We might need a change here
4010 // look at the first argument
4011 __ mov(ecx, Operand(esp, 0));
4012 __ test(ecx, ecx);
4013 __ j(zero, &packed_case, Label::kNear);
4014
4015 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4016 ArraySingleArgumentConstructorStub stub(isolate(),
4017 holey_kind,
4018 override_mode);
4019 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4020 __ jmp(&done, Label::kNear);
4021 __ bind(&packed_case);
4022 }
4023
4024 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4025 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4026 __ bind(&done);
4027 } else {
4028 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4029 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4030 }
4031}
4032
4033
4034void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4035 DCHECK(ToRegister(instr->context()).is(esi));
4036 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4037}
4038
4039
4040void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4041 Register function = ToRegister(instr->function());
4042 Register code_object = ToRegister(instr->code_object());
4043 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4044 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4045}
4046
4047
4048void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4049 Register result = ToRegister(instr->result());
4050 Register base = ToRegister(instr->base_object());
4051 if (instr->offset()->IsConstantOperand()) {
4052 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4053 __ lea(result, Operand(base, ToInteger32(offset)));
4054 } else {
4055 Register offset = ToRegister(instr->offset());
4056 __ lea(result, Operand(base, offset, times_1, 0));
4057 }
4058}
4059
4060
4061void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4062 Representation representation = instr->hydrogen()->field_representation();
4063
4064 HObjectAccess access = instr->hydrogen()->access();
4065 int offset = access.offset();
4066
4067 if (access.IsExternalMemory()) {
4068 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4069 MemOperand operand = instr->object()->IsConstantOperand()
4070 ? MemOperand::StaticVariable(
4071 ToExternalReference(LConstantOperand::cast(instr->object())))
4072 : MemOperand(ToRegister(instr->object()), offset);
4073 if (instr->value()->IsConstantOperand()) {
4074 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4075 __ mov(operand, Immediate(ToInteger32(operand_value)));
4076 } else {
4077 Register value = ToRegister(instr->value());
4078 __ Store(value, operand, representation);
4079 }
4080 return;
4081 }
4082
4083 Register object = ToRegister(instr->object());
4084 __ AssertNotSmi(object);
4085 DCHECK(!representation.IsSmi() ||
4086 !instr->value()->IsConstantOperand() ||
4087 IsSmi(LConstantOperand::cast(instr->value())));
4088 if (representation.IsDouble()) {
4089 DCHECK(access.IsInobject());
4090 DCHECK(!instr->hydrogen()->has_transition());
4091 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4092 X87Register value = ToX87Register(instr->value());
4093 X87Mov(FieldOperand(object, offset), value);
4094 return;
4095 }
4096
4097 if (instr->hydrogen()->has_transition()) {
4098 Handle<Map> transition = instr->hydrogen()->transition_map();
4099 AddDeprecationDependency(transition);
4100 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4101 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4102 Register temp = ToRegister(instr->temp());
4103 Register temp_map = ToRegister(instr->temp_map());
4104 __ mov(temp_map, transition);
4105 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4106 // Update the write barrier for the map field.
4107 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
4108 }
4109 }
4110
4111 // Do the store.
4112 Register write_register = object;
4113 if (!access.IsInobject()) {
4114 write_register = ToRegister(instr->temp());
4115 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4116 }
4117
4118 MemOperand operand = FieldOperand(write_register, offset);
4119 if (instr->value()->IsConstantOperand()) {
4120 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4121 if (operand_value->IsRegister()) {
4122 Register value = ToRegister(operand_value);
4123 __ Store(value, operand, representation);
4124 } else if (representation.IsInteger32() || representation.IsExternal()) {
4125 Immediate immediate = ToImmediate(operand_value, representation);
4126 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4127 __ mov(operand, immediate);
4128 } else {
4129 Handle<Object> handle_value = ToHandle(operand_value);
4130 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4131 __ mov(operand, handle_value);
4132 }
4133 } else {
4134 Register value = ToRegister(instr->value());
4135 __ Store(value, operand, representation);
4136 }
4137
4138 if (instr->hydrogen()->NeedsWriteBarrier()) {
4139 Register value = ToRegister(instr->value());
4140 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4141 // Update the write barrier for the object for in-object properties.
4142 __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
4143 EMIT_REMEMBERED_SET,
4144 instr->hydrogen()->SmiCheckForWriteBarrier(),
4145 instr->hydrogen()->PointersToHereCheckForValue());
4146 }
4147}
4148
4149
4150void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4151 DCHECK(ToRegister(instr->context()).is(esi));
4152 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4153 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4154
4155 if (instr->hydrogen()->HasVectorAndSlot()) {
4156 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4157 }
4158
4159 __ mov(StoreDescriptor::NameRegister(), instr->name());
4160 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4161 isolate(), instr->language_mode(),
4162 instr->hydrogen()->initialization_state()).code();
4163 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4164}
4165
4166
4167void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4168 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4169 if (instr->index()->IsConstantOperand()) {
4170 __ cmp(ToOperand(instr->length()),
4171 ToImmediate(LConstantOperand::cast(instr->index()),
4172 instr->hydrogen()->length()->representation()));
4173 cc = CommuteCondition(cc);
4174 } else if (instr->length()->IsConstantOperand()) {
4175 __ cmp(ToOperand(instr->index()),
4176 ToImmediate(LConstantOperand::cast(instr->length()),
4177 instr->hydrogen()->index()->representation()));
4178 } else {
4179 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4180 }
4181 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4182 Label done;
4183 __ j(NegateCondition(cc), &done, Label::kNear);
4184 __ int3();
4185 __ bind(&done);
4186 } else {
4187 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4188 }
4189}
4190
4191
4192void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4193 ElementsKind elements_kind = instr->elements_kind();
4194 LOperand* key = instr->key();
4195 if (!key->IsConstantOperand() &&
4196 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4197 elements_kind)) {
4198 __ SmiUntag(ToRegister(key));
4199 }
4200 Operand operand(BuildFastArrayOperand(
4201 instr->elements(),
4202 key,
4203 instr->hydrogen()->key()->representation(),
4204 elements_kind,
4205 instr->base_offset()));
4206 if (elements_kind == FLOAT32_ELEMENTS) {
4207 X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
4208 } else if (elements_kind == FLOAT64_ELEMENTS) {
4209 uint64_t int_val = kHoleNanInt64;
4210 int32_t lower = static_cast<int32_t>(int_val);
4211 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4212 Operand operand2 = BuildFastArrayOperand(
4213 instr->elements(), instr->key(),
4214 instr->hydrogen()->key()->representation(), elements_kind,
4215 instr->base_offset() + kPointerSize);
4216
4217 Label no_special_nan_handling, done;
4218 X87Register value = ToX87Register(instr->value());
4219 X87Fxch(value);
4220 __ lea(esp, Operand(esp, -kDoubleSize));
4221 __ fst_d(MemOperand(esp, 0));
4222 __ lea(esp, Operand(esp, kDoubleSize));
4223 int offset = sizeof(kHoleNanUpper32);
4224 // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
4225 // so we check the upper with 0xffffffff for hole as a temporary fix.
4226 __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
4227 __ j(not_equal, &no_special_nan_handling, Label::kNear);
4228 __ mov(operand, Immediate(lower));
4229 __ mov(operand2, Immediate(upper));
4230 __ jmp(&done, Label::kNear);
4231
4232 __ bind(&no_special_nan_handling);
4233 __ fst_d(operand);
4234 __ bind(&done);
4235 } else {
4236 Register value = ToRegister(instr->value());
4237 switch (elements_kind) {
4238 case UINT8_ELEMENTS:
4239 case INT8_ELEMENTS:
4240 case UINT8_CLAMPED_ELEMENTS:
4241 __ mov_b(operand, value);
4242 break;
4243 case UINT16_ELEMENTS:
4244 case INT16_ELEMENTS:
4245 __ mov_w(operand, value);
4246 break;
4247 case UINT32_ELEMENTS:
4248 case INT32_ELEMENTS:
4249 __ mov(operand, value);
4250 break;
4251 case FLOAT32_ELEMENTS:
4252 case FLOAT64_ELEMENTS:
4253 case FAST_SMI_ELEMENTS:
4254 case FAST_ELEMENTS:
4255 case FAST_DOUBLE_ELEMENTS:
4256 case FAST_HOLEY_SMI_ELEMENTS:
4257 case FAST_HOLEY_ELEMENTS:
4258 case FAST_HOLEY_DOUBLE_ELEMENTS:
4259 case DICTIONARY_ELEMENTS:
4260 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4261 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004262 case FAST_STRING_WRAPPER_ELEMENTS:
4263 case SLOW_STRING_WRAPPER_ELEMENTS:
4264 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004265 UNREACHABLE();
4266 break;
4267 }
4268 }
4269}
4270
4271
4272void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4273 Operand double_store_operand = BuildFastArrayOperand(
4274 instr->elements(),
4275 instr->key(),
4276 instr->hydrogen()->key()->representation(),
4277 FAST_DOUBLE_ELEMENTS,
4278 instr->base_offset());
4279
4280 uint64_t int_val = kHoleNanInt64;
4281 int32_t lower = static_cast<int32_t>(int_val);
4282 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4283 Operand double_store_operand2 = BuildFastArrayOperand(
4284 instr->elements(), instr->key(),
4285 instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
4286 instr->base_offset() + kPointerSize);
4287
4288 if (instr->hydrogen()->IsConstantHoleStore()) {
4289 // This means we should store the (double) hole. No floating point
4290 // registers required.
4291 __ mov(double_store_operand, Immediate(lower));
4292 __ mov(double_store_operand2, Immediate(upper));
4293 } else {
4294 Label no_special_nan_handling, done;
4295 X87Register value = ToX87Register(instr->value());
4296 X87Fxch(value);
4297
4298 if (instr->NeedsCanonicalization()) {
4299 __ fld(0);
4300 __ fld(0);
4301 __ FCmp();
4302 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4303 // All NaNs are Canonicalized to 0x7fffffffffffffff
4304 __ mov(double_store_operand, Immediate(0xffffffff));
4305 __ mov(double_store_operand2, Immediate(0x7fffffff));
4306 __ jmp(&done, Label::kNear);
4307 } else {
4308 __ lea(esp, Operand(esp, -kDoubleSize));
4309 __ fst_d(MemOperand(esp, 0));
4310 __ lea(esp, Operand(esp, kDoubleSize));
4311 int offset = sizeof(kHoleNanUpper32);
4312 // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
4313 // so we check the upper with 0xffffffff for hole as a temporary fix.
4314 __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
4315 __ j(not_equal, &no_special_nan_handling, Label::kNear);
4316 __ mov(double_store_operand, Immediate(lower));
4317 __ mov(double_store_operand2, Immediate(upper));
4318 __ jmp(&done, Label::kNear);
4319 }
4320 __ bind(&no_special_nan_handling);
4321 __ fst_d(double_store_operand);
4322 __ bind(&done);
4323 }
4324}
4325
4326
4327void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4328 Register elements = ToRegister(instr->elements());
4329 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4330
4331 Operand operand = BuildFastArrayOperand(
4332 instr->elements(),
4333 instr->key(),
4334 instr->hydrogen()->key()->representation(),
4335 FAST_ELEMENTS,
4336 instr->base_offset());
4337 if (instr->value()->IsRegister()) {
4338 __ mov(operand, ToRegister(instr->value()));
4339 } else {
4340 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4341 if (IsSmi(operand_value)) {
4342 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4343 __ mov(operand, immediate);
4344 } else {
4345 DCHECK(!IsInteger32(operand_value));
4346 Handle<Object> handle_value = ToHandle(operand_value);
4347 __ mov(operand, handle_value);
4348 }
4349 }
4350
4351 if (instr->hydrogen()->NeedsWriteBarrier()) {
4352 DCHECK(instr->value()->IsRegister());
4353 Register value = ToRegister(instr->value());
4354 DCHECK(!instr->key()->IsConstantOperand());
4355 SmiCheck check_needed =
4356 instr->hydrogen()->value()->type().IsHeapObject()
4357 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4358 // Compute address of modified element and store it into key register.
4359 __ lea(key, operand);
4360 __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4361 check_needed,
4362 instr->hydrogen()->PointersToHereCheckForValue());
4363 }
4364}
4365
4366
4367void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4368 // By cases...external, fast-double, fast
4369 if (instr->is_fixed_typed_array()) {
4370 DoStoreKeyedExternalArray(instr);
4371 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4372 DoStoreKeyedFixedDoubleArray(instr);
4373 } else {
4374 DoStoreKeyedFixedArray(instr);
4375 }
4376}
4377
4378
4379void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4380 DCHECK(ToRegister(instr->context()).is(esi));
4381 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4382 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4383 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4384
4385 if (instr->hydrogen()->HasVectorAndSlot()) {
4386 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4387 }
4388
4389 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4390 isolate(), instr->language_mode(),
4391 instr->hydrogen()->initialization_state()).code();
4392 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4393}
4394
4395
4396void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4397 Register object = ToRegister(instr->object());
4398 Register temp = ToRegister(instr->temp());
4399 Label no_memento_found;
4400 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4401 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4402 __ bind(&no_memento_found);
4403}
4404
4405
4406void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4407 class DeferredMaybeGrowElements final : public LDeferredCode {
4408 public:
4409 DeferredMaybeGrowElements(LCodeGen* codegen,
4410 LMaybeGrowElements* instr,
4411 const X87Stack& x87_stack)
4412 : LDeferredCode(codegen, x87_stack), instr_(instr) {}
4413 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4414 LInstruction* instr() override { return instr_; }
4415
4416 private:
4417 LMaybeGrowElements* instr_;
4418 };
4419
4420 Register result = eax;
4421 DeferredMaybeGrowElements* deferred =
4422 new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
4423 LOperand* key = instr->key();
4424 LOperand* current_capacity = instr->current_capacity();
4425
4426 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4427 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4428 DCHECK(key->IsConstantOperand() || key->IsRegister());
4429 DCHECK(current_capacity->IsConstantOperand() ||
4430 current_capacity->IsRegister());
4431
4432 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4433 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4434 int32_t constant_capacity =
4435 ToInteger32(LConstantOperand::cast(current_capacity));
4436 if (constant_key >= constant_capacity) {
4437 // Deferred case.
4438 __ jmp(deferred->entry());
4439 }
4440 } else if (key->IsConstantOperand()) {
4441 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4442 __ cmp(ToOperand(current_capacity), Immediate(constant_key));
4443 __ j(less_equal, deferred->entry());
4444 } else if (current_capacity->IsConstantOperand()) {
4445 int32_t constant_capacity =
4446 ToInteger32(LConstantOperand::cast(current_capacity));
4447 __ cmp(ToRegister(key), Immediate(constant_capacity));
4448 __ j(greater_equal, deferred->entry());
4449 } else {
4450 __ cmp(ToRegister(key), ToRegister(current_capacity));
4451 __ j(greater_equal, deferred->entry());
4452 }
4453
4454 __ mov(result, ToOperand(instr->elements()));
4455 __ bind(deferred->exit());
4456}
4457
4458
4459void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4460 // TODO(3095996): Get rid of this. For now, we need to make the
4461 // result register contain a valid pointer because it is already
4462 // contained in the register pointer map.
4463 Register result = eax;
4464 __ Move(result, Immediate(0));
4465
4466 // We have to call a stub.
4467 {
4468 PushSafepointRegistersScope scope(this);
4469 if (instr->object()->IsRegister()) {
4470 __ Move(result, ToRegister(instr->object()));
4471 } else {
4472 __ mov(result, ToOperand(instr->object()));
4473 }
4474
4475 LOperand* key = instr->key();
4476 if (key->IsConstantOperand()) {
4477 __ mov(ebx, ToImmediate(key, Representation::Smi()));
4478 } else {
4479 __ Move(ebx, ToRegister(key));
4480 __ SmiTag(ebx);
4481 }
4482
4483 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4484 instr->hydrogen()->kind());
4485 __ CallStub(&stub);
4486 RecordSafepointWithLazyDeopt(
4487 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4488 __ StoreToSafepointRegisterSlot(result, result);
4489 }
4490
4491 // Deopt on smi, which means the elements array changed to dictionary mode.
4492 __ test(result, Immediate(kSmiTagMask));
4493 DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
4494}
4495
4496
4497void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4498 Register object_reg = ToRegister(instr->object());
4499
4500 Handle<Map> from_map = instr->original_map();
4501 Handle<Map> to_map = instr->transitioned_map();
4502 ElementsKind from_kind = instr->from_kind();
4503 ElementsKind to_kind = instr->to_kind();
4504
4505 Label not_applicable;
4506 bool is_simple_map_transition =
4507 IsSimpleMapChangeTransition(from_kind, to_kind);
4508 Label::Distance branch_distance =
4509 is_simple_map_transition ? Label::kNear : Label::kFar;
4510 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4511 __ j(not_equal, &not_applicable, branch_distance);
4512 if (is_simple_map_transition) {
4513 Register new_map_reg = ToRegister(instr->new_map_temp());
4514 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4515 Immediate(to_map));
4516 // Write barrier.
4517 DCHECK_NOT_NULL(instr->temp());
4518 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4519 ToRegister(instr->temp()), kDontSaveFPRegs);
4520 } else {
4521 DCHECK(ToRegister(instr->context()).is(esi));
4522 DCHECK(object_reg.is(eax));
4523 PushSafepointRegistersScope scope(this);
4524 __ mov(ebx, to_map);
4525 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4526 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4527 __ CallStub(&stub);
4528 RecordSafepointWithLazyDeopt(instr,
4529 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4530 }
4531 __ bind(&not_applicable);
4532}
4533
4534
4535void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4536 class DeferredStringCharCodeAt final : public LDeferredCode {
4537 public:
4538 DeferredStringCharCodeAt(LCodeGen* codegen,
4539 LStringCharCodeAt* instr,
4540 const X87Stack& x87_stack)
4541 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4542 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4543 LInstruction* instr() override { return instr_; }
4544
4545 private:
4546 LStringCharCodeAt* instr_;
4547 };
4548
4549 DeferredStringCharCodeAt* deferred =
4550 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4551
4552 StringCharLoadGenerator::Generate(masm(),
4553 factory(),
4554 ToRegister(instr->string()),
4555 ToRegister(instr->index()),
4556 ToRegister(instr->result()),
4557 deferred->entry());
4558 __ bind(deferred->exit());
4559}
4560
4561
4562void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4563 Register string = ToRegister(instr->string());
4564 Register result = ToRegister(instr->result());
4565
4566 // TODO(3095996): Get rid of this. For now, we need to make the
4567 // result register contain a valid pointer because it is already
4568 // contained in the register pointer map.
4569 __ Move(result, Immediate(0));
4570
4571 PushSafepointRegistersScope scope(this);
4572 __ push(string);
4573 // Push the index as a smi. This is safe because of the checks in
4574 // DoStringCharCodeAt above.
4575 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4576 if (instr->index()->IsConstantOperand()) {
4577 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4578 Representation::Smi());
4579 __ push(immediate);
4580 } else {
4581 Register index = ToRegister(instr->index());
4582 __ SmiTag(index);
4583 __ push(index);
4584 }
4585 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4586 instr, instr->context());
4587 __ AssertSmi(eax);
4588 __ SmiUntag(eax);
4589 __ StoreToSafepointRegisterSlot(result, eax);
4590}
4591
4592
4593void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4594 class DeferredStringCharFromCode final : public LDeferredCode {
4595 public:
4596 DeferredStringCharFromCode(LCodeGen* codegen,
4597 LStringCharFromCode* instr,
4598 const X87Stack& x87_stack)
4599 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4600 void Generate() override {
4601 codegen()->DoDeferredStringCharFromCode(instr_);
4602 }
4603 LInstruction* instr() override { return instr_; }
4604
4605 private:
4606 LStringCharFromCode* instr_;
4607 };
4608
4609 DeferredStringCharFromCode* deferred =
4610 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4611
4612 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4613 Register char_code = ToRegister(instr->char_code());
4614 Register result = ToRegister(instr->result());
4615 DCHECK(!char_code.is(result));
4616
4617 __ cmp(char_code, String::kMaxOneByteCharCode);
4618 __ j(above, deferred->entry());
4619 __ Move(result, Immediate(factory()->single_character_string_cache()));
4620 __ mov(result, FieldOperand(result,
4621 char_code, times_pointer_size,
4622 FixedArray::kHeaderSize));
4623 __ cmp(result, factory()->undefined_value());
4624 __ j(equal, deferred->entry());
4625 __ bind(deferred->exit());
4626}
4627
4628
4629void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4630 Register char_code = ToRegister(instr->char_code());
4631 Register result = ToRegister(instr->result());
4632
4633 // TODO(3095996): Get rid of this. For now, we need to make the
4634 // result register contain a valid pointer because it is already
4635 // contained in the register pointer map.
4636 __ Move(result, Immediate(0));
4637
4638 PushSafepointRegistersScope scope(this);
4639 __ SmiTag(char_code);
4640 __ push(char_code);
4641 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4642 instr->context());
4643 __ StoreToSafepointRegisterSlot(result, eax);
4644}
4645
4646
4647void LCodeGen::DoStringAdd(LStringAdd* instr) {
4648 DCHECK(ToRegister(instr->context()).is(esi));
4649 DCHECK(ToRegister(instr->left()).is(edx));
4650 DCHECK(ToRegister(instr->right()).is(eax));
4651 StringAddStub stub(isolate(),
4652 instr->hydrogen()->flags(),
4653 instr->hydrogen()->pretenure_flag());
4654 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4655}
4656
4657
4658void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4659 LOperand* input = instr->value();
4660 LOperand* output = instr->result();
4661 DCHECK(input->IsRegister() || input->IsStackSlot());
4662 DCHECK(output->IsDoubleRegister());
4663 if (input->IsRegister()) {
4664 Register input_reg = ToRegister(input);
4665 __ push(input_reg);
4666 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4667 __ pop(input_reg);
4668 } else {
4669 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4670 }
4671}
4672
4673
4674void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4675 LOperand* input = instr->value();
4676 LOperand* output = instr->result();
4677 X87Register res = ToX87Register(output);
4678 X87PrepareToWrite(res);
4679 __ LoadUint32NoSSE2(ToRegister(input));
4680 X87CommitWrite(res);
4681}
4682
4683
4684void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4685 class DeferredNumberTagI final : public LDeferredCode {
4686 public:
4687 DeferredNumberTagI(LCodeGen* codegen,
4688 LNumberTagI* instr,
4689 const X87Stack& x87_stack)
4690 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4691 void Generate() override {
4692 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4693 SIGNED_INT32);
4694 }
4695 LInstruction* instr() override { return instr_; }
4696
4697 private:
4698 LNumberTagI* instr_;
4699 };
4700
4701 LOperand* input = instr->value();
4702 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4703 Register reg = ToRegister(input);
4704
4705 DeferredNumberTagI* deferred =
4706 new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4707 __ SmiTag(reg);
4708 __ j(overflow, deferred->entry());
4709 __ bind(deferred->exit());
4710}
4711
4712
4713void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4714 class DeferredNumberTagU final : public LDeferredCode {
4715 public:
4716 DeferredNumberTagU(LCodeGen* codegen,
4717 LNumberTagU* instr,
4718 const X87Stack& x87_stack)
4719 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4720 void Generate() override {
4721 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4722 UNSIGNED_INT32);
4723 }
4724 LInstruction* instr() override { return instr_; }
4725
4726 private:
4727 LNumberTagU* instr_;
4728 };
4729
4730 LOperand* input = instr->value();
4731 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4732 Register reg = ToRegister(input);
4733
4734 DeferredNumberTagU* deferred =
4735 new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4736 __ cmp(reg, Immediate(Smi::kMaxValue));
4737 __ j(above, deferred->entry());
4738 __ SmiTag(reg);
4739 __ bind(deferred->exit());
4740}
4741
4742
4743void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4744 LOperand* value,
4745 LOperand* temp,
4746 IntegerSignedness signedness) {
4747 Label done, slow;
4748 Register reg = ToRegister(value);
4749 Register tmp = ToRegister(temp);
4750
4751 if (signedness == SIGNED_INT32) {
4752 // There was overflow, so bits 30 and 31 of the original integer
4753 // disagree. Try to allocate a heap number in new space and store
4754 // the value in there. If that fails, call the runtime system.
4755 __ SmiUntag(reg);
4756 __ xor_(reg, 0x80000000);
4757 __ push(reg);
4758 __ fild_s(Operand(esp, 0));
4759 __ pop(reg);
4760 } else {
4761 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4762 // int manually.
4763 __ push(Immediate(0));
4764 __ push(reg);
4765 __ fild_d(Operand(esp, 0));
4766 __ pop(reg);
4767 __ pop(reg);
4768 }
4769
4770 if (FLAG_inline_new) {
4771 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4772 __ jmp(&done, Label::kNear);
4773 }
4774
4775 // Slow case: Call the runtime system to do the number allocation.
4776 __ bind(&slow);
4777 {
4778 // TODO(3095996): Put a valid pointer value in the stack slot where the
4779 // result register is stored, as this register is in the pointer map, but
4780 // contains an integer value.
4781 __ Move(reg, Immediate(0));
4782
4783 // Preserve the value of all registers.
4784 PushSafepointRegistersScope scope(this);
4785
4786 // NumberTagI and NumberTagD use the context from the frame, rather than
4787 // the environment's HContext or HInlinedContext value.
4788 // They only call Runtime::kAllocateHeapNumber.
4789 // The corresponding HChange instructions are added in a phase that does
4790 // not have easy access to the local context.
4791 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4792 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4793 RecordSafepointWithRegisters(
4794 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4795 __ StoreToSafepointRegisterSlot(reg, eax);
4796 }
4797
4798 __ bind(&done);
4799 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4800}
4801
4802
4803void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4804 class DeferredNumberTagD final : public LDeferredCode {
4805 public:
4806 DeferredNumberTagD(LCodeGen* codegen,
4807 LNumberTagD* instr,
4808 const X87Stack& x87_stack)
4809 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4810 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4811 LInstruction* instr() override { return instr_; }
4812
4813 private:
4814 LNumberTagD* instr_;
4815 };
4816
4817 Register reg = ToRegister(instr->result());
4818
4819 // Put the value to the top of stack
4820 X87Register src = ToX87Register(instr->value());
4821 // Don't use X87LoadForUsage here, which is only used by Instruction which
4822 // clobbers fp registers.
4823 x87_stack_.Fxch(src);
4824
4825 DeferredNumberTagD* deferred =
4826 new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
4827 if (FLAG_inline_new) {
4828 Register tmp = ToRegister(instr->temp());
4829 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4830 } else {
4831 __ jmp(deferred->entry());
4832 }
4833 __ bind(deferred->exit());
4834 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
4835}
4836
4837
4838void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4839 // TODO(3095996): Get rid of this. For now, we need to make the
4840 // result register contain a valid pointer because it is already
4841 // contained in the register pointer map.
4842 Register reg = ToRegister(instr->result());
4843 __ Move(reg, Immediate(0));
4844
4845 PushSafepointRegistersScope scope(this);
4846 // NumberTagI and NumberTagD use the context from the frame, rather than
4847 // the environment's HContext or HInlinedContext value.
4848 // They only call Runtime::kAllocateHeapNumber.
4849 // The corresponding HChange instructions are added in a phase that does
4850 // not have easy access to the local context.
4851 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4852 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4853 RecordSafepointWithRegisters(
4854 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4855 __ StoreToSafepointRegisterSlot(reg, eax);
4856}
4857
4858
4859void LCodeGen::DoSmiTag(LSmiTag* instr) {
4860 HChange* hchange = instr->hydrogen();
4861 Register input = ToRegister(instr->value());
4862 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4863 hchange->value()->CheckFlag(HValue::kUint32)) {
4864 __ test(input, Immediate(0xc0000000));
4865 DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
4866 }
4867 __ SmiTag(input);
4868 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4869 !hchange->value()->CheckFlag(HValue::kUint32)) {
4870 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4871 }
4872}
4873
4874
4875void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4876 LOperand* input = instr->value();
4877 Register result = ToRegister(input);
4878 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4879 if (instr->needs_check()) {
4880 __ test(result, Immediate(kSmiTagMask));
4881 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4882 } else {
4883 __ AssertSmi(result);
4884 }
4885 __ SmiUntag(result);
4886}
4887
4888
4889void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
4890 Register temp_reg, X87Register res_reg,
4891 NumberUntagDMode mode) {
4892 bool can_convert_undefined_to_nan =
4893 instr->hydrogen()->can_convert_undefined_to_nan();
4894 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4895
4896 Label load_smi, done;
4897
4898 X87PrepareToWrite(res_reg);
4899 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4900 // Smi check.
4901 __ JumpIfSmi(input_reg, &load_smi);
4902
4903 // Heap number map check.
4904 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4905 factory()->heap_number_map());
4906 if (!can_convert_undefined_to_nan) {
4907 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4908 } else {
4909 Label heap_number, convert;
4910 __ j(equal, &heap_number);
4911
4912 // Convert undefined (or hole) to NaN.
4913 __ cmp(input_reg, factory()->undefined_value());
4914 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4915
4916 __ bind(&convert);
4917 __ push(Immediate(0xffffffff));
4918 __ push(Immediate(0x7fffffff));
4919 __ fld_d(MemOperand(esp, 0));
4920 __ lea(esp, Operand(esp, kDoubleSize));
4921 __ jmp(&done, Label::kNear);
4922
4923 __ bind(&heap_number);
4924 }
4925 // Heap number to x87 conversion.
4926 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4927 if (deoptimize_on_minus_zero) {
4928 __ fldz();
4929 __ FCmp();
4930 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4931 __ j(not_zero, &done, Label::kNear);
4932
4933 // Use general purpose registers to check if we have -0.0
4934 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4935 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
4936 __ j(zero, &done, Label::kNear);
4937
4938 // Pop FPU stack before deoptimizing.
4939 __ fstp(0);
4940 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4941 }
4942 __ jmp(&done, Label::kNear);
4943 } else {
4944 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4945 }
4946
4947 __ bind(&load_smi);
4948 // Clobbering a temp is faster than re-tagging the
4949 // input register since we avoid dependencies.
4950 __ mov(temp_reg, input_reg);
4951 __ SmiUntag(temp_reg); // Untag smi before converting to float.
4952 __ push(temp_reg);
4953 __ fild_s(Operand(esp, 0));
4954 __ add(esp, Immediate(kPointerSize));
4955 __ bind(&done);
4956 X87CommitWrite(res_reg);
4957}
4958
4959
4960void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4961 Register input_reg = ToRegister(instr->value());
4962
4963 // The input was optimistically untagged; revert it.
4964 STATIC_ASSERT(kSmiTagSize == 1);
4965 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4966
4967 if (instr->truncating()) {
4968 Label no_heap_number, check_bools, check_false;
4969
4970 // Heap number map check.
4971 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4972 factory()->heap_number_map());
4973 __ j(not_equal, &no_heap_number, Label::kNear);
4974 __ TruncateHeapNumberToI(input_reg, input_reg);
4975 __ jmp(done);
4976
4977 __ bind(&no_heap_number);
4978 // Check for Oddballs. Undefined/False is converted to zero and True to one
4979 // for truncating conversions.
4980 __ cmp(input_reg, factory()->undefined_value());
4981 __ j(not_equal, &check_bools, Label::kNear);
4982 __ Move(input_reg, Immediate(0));
4983 __ jmp(done);
4984
4985 __ bind(&check_bools);
4986 __ cmp(input_reg, factory()->true_value());
4987 __ j(not_equal, &check_false, Label::kNear);
4988 __ Move(input_reg, Immediate(1));
4989 __ jmp(done);
4990
4991 __ bind(&check_false);
4992 __ cmp(input_reg, factory()->false_value());
4993 DeoptimizeIf(not_equal, instr,
4994 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4995 __ Move(input_reg, Immediate(0));
4996 } else {
4997 // TODO(olivf) Converting a number on the fpu is actually quite slow. We
4998 // should first try a fast conversion and then bailout to this slow case.
4999 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5000 isolate()->factory()->heap_number_map());
5001 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5002
5003 __ sub(esp, Immediate(kPointerSize));
5004 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5005
5006 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5007 Label no_precision_lost, not_nan, zero_check;
5008 __ fld(0);
5009
5010 __ fist_s(MemOperand(esp, 0));
5011 __ fild_s(MemOperand(esp, 0));
5012 __ FCmp();
5013 __ pop(input_reg);
5014
5015 __ j(equal, &no_precision_lost, Label::kNear);
5016 __ fstp(0);
5017 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5018 __ bind(&no_precision_lost);
5019
5020 __ j(parity_odd, &not_nan);
5021 __ fstp(0);
5022 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5023 __ bind(&not_nan);
5024
5025 __ test(input_reg, Operand(input_reg));
5026 __ j(zero, &zero_check, Label::kNear);
5027 __ fstp(0);
5028 __ jmp(done);
5029
5030 __ bind(&zero_check);
5031 // To check for minus zero, we load the value again as float, and check
5032 // if that is still 0.
5033 __ sub(esp, Immediate(kPointerSize));
5034 __ fstp_s(Operand(esp, 0));
5035 __ pop(input_reg);
5036 __ test(input_reg, Operand(input_reg));
5037 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5038 } else {
5039 __ fist_s(MemOperand(esp, 0));
5040 __ fild_s(MemOperand(esp, 0));
5041 __ FCmp();
5042 __ pop(input_reg);
5043 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5044 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5045 }
5046 }
5047}
5048
5049
5050void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5051 class DeferredTaggedToI final : public LDeferredCode {
5052 public:
5053 DeferredTaggedToI(LCodeGen* codegen,
5054 LTaggedToI* instr,
5055 const X87Stack& x87_stack)
5056 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5057 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
5058 LInstruction* instr() override { return instr_; }
5059
5060 private:
5061 LTaggedToI* instr_;
5062 };
5063
5064 LOperand* input = instr->value();
5065 DCHECK(input->IsRegister());
5066 Register input_reg = ToRegister(input);
5067 DCHECK(input_reg.is(ToRegister(instr->result())));
5068
5069 if (instr->hydrogen()->value()->representation().IsSmi()) {
5070 __ SmiUntag(input_reg);
5071 } else {
5072 DeferredTaggedToI* deferred =
5073 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5074 // Optimistically untag the input.
5075 // If the input is a HeapObject, SmiUntag will set the carry flag.
5076 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5077 __ SmiUntag(input_reg);
5078 // Branch to deferred code if the input was tagged.
5079 // The deferred code will take care of restoring the tag.
5080 __ j(carry, deferred->entry());
5081 __ bind(deferred->exit());
5082 }
5083}
5084
5085
5086void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5087 LOperand* input = instr->value();
5088 DCHECK(input->IsRegister());
5089 LOperand* temp = instr->temp();
5090 DCHECK(temp->IsRegister());
5091 LOperand* result = instr->result();
5092 DCHECK(result->IsDoubleRegister());
5093
5094 Register input_reg = ToRegister(input);
5095 Register temp_reg = ToRegister(temp);
5096
5097 HValue* value = instr->hydrogen()->value();
5098 NumberUntagDMode mode = value->representation().IsSmi()
5099 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5100
5101 EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
5102 mode);
5103}
5104
5105
5106void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5107 LOperand* input = instr->value();
5108 DCHECK(input->IsDoubleRegister());
5109 LOperand* result = instr->result();
5110 DCHECK(result->IsRegister());
5111 Register result_reg = ToRegister(result);
5112
5113 if (instr->truncating()) {
5114 X87Register input_reg = ToX87Register(input);
5115 X87Fxch(input_reg);
5116 __ TruncateX87TOSToI(result_reg);
5117 } else {
5118 Label lost_precision, is_nan, minus_zero, done;
5119 X87Register input_reg = ToX87Register(input);
5120 X87Fxch(input_reg);
5121 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5122 &lost_precision, &is_nan, &minus_zero);
5123 __ jmp(&done);
5124 __ bind(&lost_precision);
5125 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5126 __ bind(&is_nan);
5127 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5128 __ bind(&minus_zero);
5129 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5130 __ bind(&done);
5131 }
5132}
5133
5134
5135void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5136 LOperand* input = instr->value();
5137 DCHECK(input->IsDoubleRegister());
5138 LOperand* result = instr->result();
5139 DCHECK(result->IsRegister());
5140 Register result_reg = ToRegister(result);
5141
5142 Label lost_precision, is_nan, minus_zero, done;
5143 X87Register input_reg = ToX87Register(input);
5144 X87Fxch(input_reg);
5145 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5146 &lost_precision, &is_nan, &minus_zero);
5147 __ jmp(&done);
5148 __ bind(&lost_precision);
5149 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5150 __ bind(&is_nan);
5151 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5152 __ bind(&minus_zero);
5153 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5154 __ bind(&done);
5155 __ SmiTag(result_reg);
5156 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5157}
5158
5159
5160void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5161 LOperand* input = instr->value();
5162 __ test(ToOperand(input), Immediate(kSmiTagMask));
5163 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
5164}
5165
5166
5167void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5168 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5169 LOperand* input = instr->value();
5170 __ test(ToOperand(input), Immediate(kSmiTagMask));
5171 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
5172 }
5173}
5174
5175
5176void LCodeGen::DoCheckArrayBufferNotNeutered(
5177 LCheckArrayBufferNotNeutered* instr) {
5178 Register view = ToRegister(instr->view());
5179 Register scratch = ToRegister(instr->scratch());
5180
5181 __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
5182 __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
5183 1 << JSArrayBuffer::WasNeutered::kShift);
5184 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5185}
5186
5187
5188void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5189 Register input = ToRegister(instr->value());
5190 Register temp = ToRegister(instr->temp());
5191
5192 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5193
5194 if (instr->hydrogen()->is_interval_check()) {
5195 InstanceType first;
5196 InstanceType last;
5197 instr->hydrogen()->GetCheckInterval(&first, &last);
5198
5199 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5200 static_cast<int8_t>(first));
5201
5202 // If there is only one type in the interval check for equality.
5203 if (first == last) {
5204 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5205 } else {
5206 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5207 // Omit check for the last type.
5208 if (last != LAST_TYPE) {
5209 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5210 static_cast<int8_t>(last));
5211 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5212 }
5213 }
5214 } else {
5215 uint8_t mask;
5216 uint8_t tag;
5217 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5218
5219 if (base::bits::IsPowerOfTwo32(mask)) {
5220 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5221 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5222 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5223 Deoptimizer::kWrongInstanceType);
5224 } else {
5225 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5226 __ and_(temp, mask);
5227 __ cmp(temp, tag);
5228 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5229 }
5230 }
5231}
5232
5233
5234void LCodeGen::DoCheckValue(LCheckValue* instr) {
5235 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5236 if (instr->hydrogen()->object_in_new_space()) {
5237 Register reg = ToRegister(instr->value());
5238 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5239 __ cmp(reg, Operand::ForCell(cell));
5240 } else {
5241 Operand operand = ToOperand(instr->value());
5242 __ cmp(operand, object);
5243 }
5244 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5245}
5246
5247
5248void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5249 {
5250 PushSafepointRegistersScope scope(this);
5251 __ push(object);
5252 __ xor_(esi, esi);
5253 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5254 RecordSafepointWithRegisters(
5255 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5256
5257 __ test(eax, Immediate(kSmiTagMask));
5258 }
5259 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5260}
5261
5262
5263void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5264 class DeferredCheckMaps final : public LDeferredCode {
5265 public:
5266 DeferredCheckMaps(LCodeGen* codegen,
5267 LCheckMaps* instr,
5268 Register object,
5269 const X87Stack& x87_stack)
5270 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5271 SetExit(check_maps());
5272 }
5273 void Generate() override {
5274 codegen()->DoDeferredInstanceMigration(instr_, object_);
5275 }
5276 Label* check_maps() { return &check_maps_; }
5277 LInstruction* instr() override { return instr_; }
5278
5279 private:
5280 LCheckMaps* instr_;
5281 Label check_maps_;
5282 Register object_;
5283 };
5284
5285 if (instr->hydrogen()->IsStabilityCheck()) {
5286 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5287 for (int i = 0; i < maps->size(); ++i) {
5288 AddStabilityDependency(maps->at(i).handle());
5289 }
5290 return;
5291 }
5292
5293 LOperand* input = instr->value();
5294 DCHECK(input->IsRegister());
5295 Register reg = ToRegister(input);
5296
5297 DeferredCheckMaps* deferred = NULL;
5298 if (instr->hydrogen()->HasMigrationTarget()) {
5299 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5300 __ bind(deferred->check_maps());
5301 }
5302
5303 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5304 Label success;
5305 for (int i = 0; i < maps->size() - 1; i++) {
5306 Handle<Map> map = maps->at(i).handle();
5307 __ CompareMap(reg, map);
5308 __ j(equal, &success, Label::kNear);
5309 }
5310
5311 Handle<Map> map = maps->at(maps->size() - 1).handle();
5312 __ CompareMap(reg, map);
5313 if (instr->hydrogen()->HasMigrationTarget()) {
5314 __ j(not_equal, deferred->entry());
5315 } else {
5316 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5317 }
5318
5319 __ bind(&success);
5320}
5321
5322
5323void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5324 X87Register value_reg = ToX87Register(instr->unclamped());
5325 Register result_reg = ToRegister(instr->result());
5326 X87Fxch(value_reg);
5327 __ ClampTOSToUint8(result_reg);
5328}
5329
5330
5331void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5332 DCHECK(instr->unclamped()->Equals(instr->result()));
5333 Register value_reg = ToRegister(instr->result());
5334 __ ClampUint8(value_reg);
5335}
5336
5337
5338void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5339 Register input_reg = ToRegister(instr->unclamped());
5340 Register result_reg = ToRegister(instr->result());
5341 Register scratch = ToRegister(instr->scratch());
5342 Register scratch2 = ToRegister(instr->scratch2());
5343 Register scratch3 = ToRegister(instr->scratch3());
5344 Label is_smi, done, heap_number, valid_exponent,
5345 largest_value, zero_result, maybe_nan_or_infinity;
5346
5347 __ JumpIfSmi(input_reg, &is_smi);
5348
5349 // Check for heap number
5350 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5351 factory()->heap_number_map());
5352 __ j(equal, &heap_number, Label::kNear);
5353
5354 // Check for undefined. Undefined is converted to zero for clamping
5355 // conversions.
5356 __ cmp(input_reg, factory()->undefined_value());
5357 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5358 __ jmp(&zero_result, Label::kNear);
5359
5360 // Heap number
5361 __ bind(&heap_number);
5362
5363 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5364 // faster than the x86 FPU built-in instruction, especially since "banker's
5365 // rounding" would be additionally very expensive
5366
5367 // Get exponent word.
5368 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5369 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5370
5371 // Test for negative values --> clamp to zero
5372 __ test(scratch, scratch);
5373 __ j(negative, &zero_result, Label::kNear);
5374
5375 // Get exponent alone in scratch2.
5376 __ mov(scratch2, scratch);
5377 __ and_(scratch2, HeapNumber::kExponentMask);
5378 __ shr(scratch2, HeapNumber::kExponentShift);
5379 __ j(zero, &zero_result, Label::kNear);
5380 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5381 __ j(negative, &zero_result, Label::kNear);
5382
5383 const uint32_t non_int8_exponent = 7;
5384 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5385 // If the exponent is too big, check for special values.
5386 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5387
5388 __ bind(&valid_exponent);
5389 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5390 // < 7. The shift bias is the number of bits to shift the mantissa such that
5391 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5392 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5393 // 1).
5394 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5395 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5396 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5397 // top bits of the mantissa.
5398 __ and_(scratch, HeapNumber::kMantissaMask);
5399 // Put back the implicit 1 of the mantissa
5400 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5401 // Shift up to round
5402 __ shl_cl(scratch);
5403 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5404 // use the bit in the "ones" place and add it to the "halves" place, which has
5405 // the effect of rounding to even.
5406 __ mov(scratch2, scratch);
5407 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5408 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5409 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5410 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5411 Label no_round;
5412 __ j(less, &no_round, Label::kNear);
5413 Label round_up;
5414 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5415 __ j(greater, &round_up, Label::kNear);
5416 __ test(scratch3, scratch3);
5417 __ j(not_zero, &round_up, Label::kNear);
5418 __ mov(scratch2, scratch);
5419 __ and_(scratch2, Immediate(1 << one_bit_shift));
5420 __ shr(scratch2, 1);
5421 __ bind(&round_up);
5422 __ add(scratch, scratch2);
5423 __ j(overflow, &largest_value, Label::kNear);
5424 __ bind(&no_round);
5425 __ shr(scratch, 23);
5426 __ mov(result_reg, scratch);
5427 __ jmp(&done, Label::kNear);
5428
5429 __ bind(&maybe_nan_or_infinity);
5430 // Check for NaN/Infinity, all other values map to 255
5431 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5432 __ j(not_equal, &largest_value, Label::kNear);
5433
5434 // Check for NaN, which differs from Infinity in that at least one mantissa
5435 // bit is set.
5436 __ and_(scratch, HeapNumber::kMantissaMask);
5437 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5438 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5439 // Infinity -> Fall through to map to 255.
5440
5441 __ bind(&largest_value);
5442 __ mov(result_reg, Immediate(255));
5443 __ jmp(&done, Label::kNear);
5444
5445 __ bind(&zero_result);
5446 __ xor_(result_reg, result_reg);
5447 __ jmp(&done, Label::kNear);
5448
5449 // smi
5450 __ bind(&is_smi);
5451 if (!input_reg.is(result_reg)) {
5452 __ mov(result_reg, input_reg);
5453 }
5454 __ SmiUntag(result_reg);
5455 __ ClampUint8(result_reg);
5456 __ bind(&done);
5457}
5458
5459
5460void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5461 X87Register value_reg = ToX87Register(instr->value());
5462 Register result_reg = ToRegister(instr->result());
5463 X87Fxch(value_reg);
5464 __ sub(esp, Immediate(kDoubleSize));
5465 __ fst_d(Operand(esp, 0));
5466 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5467 __ mov(result_reg, Operand(esp, kPointerSize));
5468 } else {
5469 __ mov(result_reg, Operand(esp, 0));
5470 }
5471 __ add(esp, Immediate(kDoubleSize));
5472}
5473
5474
5475void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5476 Register hi_reg = ToRegister(instr->hi());
5477 Register lo_reg = ToRegister(instr->lo());
5478 X87Register result_reg = ToX87Register(instr->result());
5479 // Follow below pattern to write a x87 fp register.
5480 X87PrepareToWrite(result_reg);
5481 __ sub(esp, Immediate(kDoubleSize));
5482 __ mov(Operand(esp, 0), lo_reg);
5483 __ mov(Operand(esp, kPointerSize), hi_reg);
5484 __ fld_d(Operand(esp, 0));
5485 __ add(esp, Immediate(kDoubleSize));
5486 X87CommitWrite(result_reg);
5487}
5488
5489
5490void LCodeGen::DoAllocate(LAllocate* instr) {
5491 class DeferredAllocate final : public LDeferredCode {
5492 public:
5493 DeferredAllocate(LCodeGen* codegen,
5494 LAllocate* instr,
5495 const X87Stack& x87_stack)
5496 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5497 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5498 LInstruction* instr() override { return instr_; }
5499
5500 private:
5501 LAllocate* instr_;
5502 };
5503
5504 DeferredAllocate* deferred =
5505 new(zone()) DeferredAllocate(this, instr, x87_stack_);
5506
5507 Register result = ToRegister(instr->result());
5508 Register temp = ToRegister(instr->temp());
5509
5510 // Allocate memory for the object.
5511 AllocationFlags flags = TAG_OBJECT;
5512 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5513 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5514 }
5515 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5516 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5517 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5518 }
5519
5520 if (instr->size()->IsConstantOperand()) {
5521 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5522 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5523 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5524 } else {
5525 Register size = ToRegister(instr->size());
5526 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5527 }
5528
5529 __ bind(deferred->exit());
5530
5531 if (instr->hydrogen()->MustPrefillWithFiller()) {
5532 if (instr->size()->IsConstantOperand()) {
5533 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5534 __ mov(temp, (size / kPointerSize) - 1);
5535 } else {
5536 temp = ToRegister(instr->size());
5537 __ shr(temp, kPointerSizeLog2);
5538 __ dec(temp);
5539 }
5540 Label loop;
5541 __ bind(&loop);
5542 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5543 isolate()->factory()->one_pointer_filler_map());
5544 __ dec(temp);
5545 __ j(not_zero, &loop);
5546 }
5547}
5548
5549
5550void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5551 Register result = ToRegister(instr->result());
5552
5553 // TODO(3095996): Get rid of this. For now, we need to make the
5554 // result register contain a valid pointer because it is already
5555 // contained in the register pointer map.
5556 __ Move(result, Immediate(Smi::FromInt(0)));
5557
5558 PushSafepointRegistersScope scope(this);
5559 if (instr->size()->IsRegister()) {
5560 Register size = ToRegister(instr->size());
5561 DCHECK(!size.is(result));
5562 __ SmiTag(ToRegister(instr->size()));
5563 __ push(size);
5564 } else {
5565 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5566 if (size >= 0 && size <= Smi::kMaxValue) {
5567 __ push(Immediate(Smi::FromInt(size)));
5568 } else {
5569 // We should never get here at runtime => abort
5570 __ int3();
5571 return;
5572 }
5573 }
5574
5575 int flags = AllocateDoubleAlignFlag::encode(
5576 instr->hydrogen()->MustAllocateDoubleAligned());
5577 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5578 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5579 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5580 } else {
5581 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5582 }
5583 __ push(Immediate(Smi::FromInt(flags)));
5584
5585 CallRuntimeFromDeferred(
5586 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5587 __ StoreToSafepointRegisterSlot(result, eax);
5588}
5589
5590
5591void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5592 DCHECK(ToRegister(instr->value()).is(eax));
5593 __ push(eax);
5594 CallRuntime(Runtime::kToFastProperties, 1, instr);
5595}
5596
5597
5598void LCodeGen::DoTypeof(LTypeof* instr) {
5599 DCHECK(ToRegister(instr->context()).is(esi));
5600 DCHECK(ToRegister(instr->value()).is(ebx));
5601 Label end, do_call;
5602 Register value_register = ToRegister(instr->value());
5603 __ JumpIfNotSmi(value_register, &do_call);
5604 __ mov(eax, Immediate(isolate()->factory()->number_string()));
5605 __ jmp(&end);
5606 __ bind(&do_call);
5607 TypeofStub stub(isolate());
5608 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5609 __ bind(&end);
5610}
5611
5612
5613void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5614 Register input = ToRegister(instr->value());
5615 Condition final_branch_condition = EmitTypeofIs(instr, input);
5616 if (final_branch_condition != no_condition) {
5617 EmitBranch(instr, final_branch_condition);
5618 }
5619}
5620
5621
5622Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5623 Label* true_label = instr->TrueLabel(chunk_);
5624 Label* false_label = instr->FalseLabel(chunk_);
5625 Handle<String> type_name = instr->type_literal();
5626 int left_block = instr->TrueDestination(chunk_);
5627 int right_block = instr->FalseDestination(chunk_);
5628 int next_block = GetNextEmittedBlock();
5629
5630 Label::Distance true_distance = left_block == next_block ? Label::kNear
5631 : Label::kFar;
5632 Label::Distance false_distance = right_block == next_block ? Label::kNear
5633 : Label::kFar;
5634 Condition final_branch_condition = no_condition;
5635 if (String::Equals(type_name, factory()->number_string())) {
5636 __ JumpIfSmi(input, true_label, true_distance);
5637 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5638 factory()->heap_number_map());
5639 final_branch_condition = equal;
5640
5641 } else if (String::Equals(type_name, factory()->string_string())) {
5642 __ JumpIfSmi(input, false_label, false_distance);
5643 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5644 final_branch_condition = below;
5645
5646 } else if (String::Equals(type_name, factory()->symbol_string())) {
5647 __ JumpIfSmi(input, false_label, false_distance);
5648 __ CmpObjectType(input, SYMBOL_TYPE, input);
5649 final_branch_condition = equal;
5650
5651 } else if (String::Equals(type_name, factory()->boolean_string())) {
5652 __ cmp(input, factory()->true_value());
5653 __ j(equal, true_label, true_distance);
5654 __ cmp(input, factory()->false_value());
5655 final_branch_condition = equal;
5656
5657 } else if (String::Equals(type_name, factory()->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005658 __ cmp(input, factory()->null_value());
5659 __ j(equal, false_label, false_distance);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005660 __ JumpIfSmi(input, false_label, false_distance);
5661 // Check for undetectable objects => true.
5662 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5663 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5664 1 << Map::kIsUndetectable);
5665 final_branch_condition = not_zero;
5666
5667 } else if (String::Equals(type_name, factory()->function_string())) {
5668 __ JumpIfSmi(input, false_label, false_distance);
5669 // Check for callable and not undetectable objects => true.
5670 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5671 __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
5672 __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5673 __ cmp(input, 1 << Map::kIsCallable);
5674 final_branch_condition = equal;
5675
5676 } else if (String::Equals(type_name, factory()->object_string())) {
5677 __ JumpIfSmi(input, false_label, false_distance);
5678 __ cmp(input, factory()->null_value());
5679 __ j(equal, true_label, true_distance);
5680 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5681 __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5682 __ j(below, false_label, false_distance);
5683 // Check for callable or undetectable objects => false.
5684 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5685 (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5686 final_branch_condition = zero;
5687
5688// clang-format off
5689#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5690 } else if (String::Equals(type_name, factory()->type##_string())) { \
5691 __ JumpIfSmi(input, false_label, false_distance); \
5692 __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
5693 factory()->type##_map()); \
5694 final_branch_condition = equal;
5695 SIMD128_TYPES(SIMD128_TYPE)
5696#undef SIMD128_TYPE
5697 // clang-format on
5698
5699 } else {
5700 __ jmp(false_label, false_distance);
5701 }
5702 return final_branch_condition;
5703}
5704
5705
5706void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5707 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5708 // Ensure that we have enough space after the previous lazy-bailout
5709 // instruction for patching the code here.
5710 int current_pc = masm()->pc_offset();
5711 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5712 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5713 __ Nop(padding_size);
5714 }
5715 }
5716 last_lazy_deopt_pc_ = masm()->pc_offset();
5717}
5718
5719
5720void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5721 last_lazy_deopt_pc_ = masm()->pc_offset();
5722 DCHECK(instr->HasEnvironment());
5723 LEnvironment* env = instr->environment();
5724 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5725 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5726}
5727
5728
5729void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5730 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5731 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5732 // needed return address), even though the implementation of LAZY and EAGER is
5733 // now identical. When LAZY is eventually completely folded into EAGER, remove
5734 // the special case below.
5735 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5736 type = Deoptimizer::LAZY;
5737 }
5738 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5739}
5740
5741
5742void LCodeGen::DoDummy(LDummy* instr) {
5743 // Nothing to see here, move on!
5744}
5745
5746
5747void LCodeGen::DoDummyUse(LDummyUse* instr) {
5748 // Nothing to see here, move on!
5749}
5750
5751
5752void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5753 PushSafepointRegistersScope scope(this);
5754 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5755 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5756 RecordSafepointWithLazyDeopt(
5757 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5758 DCHECK(instr->HasEnvironment());
5759 LEnvironment* env = instr->environment();
5760 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5761}
5762
5763
5764void LCodeGen::DoStackCheck(LStackCheck* instr) {
5765 class DeferredStackCheck final : public LDeferredCode {
5766 public:
5767 DeferredStackCheck(LCodeGen* codegen,
5768 LStackCheck* instr,
5769 const X87Stack& x87_stack)
5770 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5771 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5772 LInstruction* instr() override { return instr_; }
5773
5774 private:
5775 LStackCheck* instr_;
5776 };
5777
5778 DCHECK(instr->HasEnvironment());
5779 LEnvironment* env = instr->environment();
5780 // There is no LLazyBailout instruction for stack-checks. We have to
5781 // prepare for lazy deoptimization explicitly here.
5782 if (instr->hydrogen()->is_function_entry()) {
5783 // Perform stack overflow check.
5784 Label done;
5785 ExternalReference stack_limit =
5786 ExternalReference::address_of_stack_limit(isolate());
5787 __ cmp(esp, Operand::StaticVariable(stack_limit));
5788 __ j(above_equal, &done, Label::kNear);
5789
5790 DCHECK(instr->context()->IsRegister());
5791 DCHECK(ToRegister(instr->context()).is(esi));
5792 CallCode(isolate()->builtins()->StackCheck(),
5793 RelocInfo::CODE_TARGET,
5794 instr);
5795 __ bind(&done);
5796 } else {
5797 DCHECK(instr->hydrogen()->is_backwards_branch());
5798 // Perform stack overflow check if this goto needs it before jumping.
5799 DeferredStackCheck* deferred_stack_check =
5800 new(zone()) DeferredStackCheck(this, instr, x87_stack_);
5801 ExternalReference stack_limit =
5802 ExternalReference::address_of_stack_limit(isolate());
5803 __ cmp(esp, Operand::StaticVariable(stack_limit));
5804 __ j(below, deferred_stack_check->entry());
5805 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5806 __ bind(instr->done_label());
5807 deferred_stack_check->SetExit(instr->done_label());
5808 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5809 // Don't record a deoptimization index for the safepoint here.
5810 // This will be done explicitly when emitting call and the safepoint in
5811 // the deferred code.
5812 }
5813}
5814
5815
5816void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5817 // This is a pseudo-instruction that ensures that the environment here is
5818 // properly registered for deoptimization and records the assembler's PC
5819 // offset.
5820 LEnvironment* environment = instr->environment();
5821
5822 // If the environment were already registered, we would have no way of
5823 // backpatching it with the spill slot operands.
5824 DCHECK(!environment->HasBeenRegistered());
5825 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5826
5827 GenerateOsrPrologue();
5828}
5829
5830
5831void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5832 DCHECK(ToRegister(instr->context()).is(esi));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005833
5834 Label use_cache, call_runtime;
5835 __ CheckEnumCache(&call_runtime);
5836
5837 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5838 __ jmp(&use_cache, Label::kNear);
5839
5840 // Get the set of properties to enumerate.
5841 __ bind(&call_runtime);
5842 __ push(eax);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005843 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005844 __ bind(&use_cache);
5845}
5846
5847
5848void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5849 Register map = ToRegister(instr->map());
5850 Register result = ToRegister(instr->result());
5851 Label load_cache, done;
5852 __ EnumLength(result, map);
5853 __ cmp(result, Immediate(Smi::FromInt(0)));
5854 __ j(not_equal, &load_cache, Label::kNear);
5855 __ mov(result, isolate()->factory()->empty_fixed_array());
5856 __ jmp(&done, Label::kNear);
5857
5858 __ bind(&load_cache);
5859 __ LoadInstanceDescriptors(map, result);
5860 __ mov(result,
5861 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5862 __ mov(result,
5863 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5864 __ bind(&done);
5865 __ test(result, result);
5866 DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
5867}
5868
5869
5870void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5871 Register object = ToRegister(instr->value());
5872 __ cmp(ToRegister(instr->map()),
5873 FieldOperand(object, HeapObject::kMapOffset));
5874 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5875}
5876
5877
5878void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5879 Register object,
5880 Register index) {
5881 PushSafepointRegistersScope scope(this);
5882 __ push(object);
5883 __ push(index);
5884 __ xor_(esi, esi);
5885 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5886 RecordSafepointWithRegisters(
5887 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5888 __ StoreToSafepointRegisterSlot(object, eax);
5889}
5890
5891
5892void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5893 class DeferredLoadMutableDouble final : public LDeferredCode {
5894 public:
5895 DeferredLoadMutableDouble(LCodeGen* codegen,
5896 LLoadFieldByIndex* instr,
5897 Register object,
5898 Register index,
5899 const X87Stack& x87_stack)
5900 : LDeferredCode(codegen, x87_stack),
5901 instr_(instr),
5902 object_(object),
5903 index_(index) {
5904 }
5905 void Generate() override {
5906 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5907 }
5908 LInstruction* instr() override { return instr_; }
5909
5910 private:
5911 LLoadFieldByIndex* instr_;
5912 Register object_;
5913 Register index_;
5914 };
5915
5916 Register object = ToRegister(instr->object());
5917 Register index = ToRegister(instr->index());
5918
5919 DeferredLoadMutableDouble* deferred;
5920 deferred = new(zone()) DeferredLoadMutableDouble(
5921 this, instr, object, index, x87_stack_);
5922
5923 Label out_of_object, done;
5924 __ test(index, Immediate(Smi::FromInt(1)));
5925 __ j(not_zero, deferred->entry());
5926
5927 __ sar(index, 1);
5928
5929 __ cmp(index, Immediate(0));
5930 __ j(less, &out_of_object, Label::kNear);
5931 __ mov(object, FieldOperand(object,
5932 index,
5933 times_half_pointer_size,
5934 JSObject::kHeaderSize));
5935 __ jmp(&done, Label::kNear);
5936
5937 __ bind(&out_of_object);
5938 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5939 __ neg(index);
5940 // Index is now equal to out of object property index plus 1.
5941 __ mov(object, FieldOperand(object,
5942 index,
5943 times_half_pointer_size,
5944 FixedArray::kHeaderSize - kPointerSize));
5945 __ bind(deferred->exit());
5946 __ bind(&done);
5947}
5948
5949
5950void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5951 Register context = ToRegister(instr->context());
5952 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
5953}
5954
5955
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005956#undef __
5957
5958} // namespace internal
5959} // namespace v8
5960
5961#endif // V8_TARGET_ARCH_X87