blob: 8e6dbef2d16186720abee8dc4b0ea81299b48c8a [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "debug.h"
33#include "ic-inl.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "scopes.h"
37
38namespace v8 {
39namespace internal {
40
41#define __ ACCESS_MASM(masm_)
42
43// -------------------------------------------------------------------------
44// Platform-specific DeferredCode functions.
45
46void DeferredCode::SaveRegisters() {
47 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
48 int action = registers_[i];
49 if (action == kPush) {
50 __ push(RegisterAllocator::ToRegister(i));
51 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
52 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
53 }
54 }
55}
56
57void DeferredCode::RestoreRegisters() {
58 // Restore registers in reverse order due to the stack.
59 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
60 int action = registers_[i];
61 if (action == kPush) {
62 __ pop(RegisterAllocator::ToRegister(i));
63 } else if (action != kIgnore) {
64 action &= ~kSyncedFlag;
65 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
66 }
67 }
68}
69
70
71// -------------------------------------------------------------------------
72// CodeGenState implementation.
73
74CodeGenState::CodeGenState(CodeGenerator* owner)
75 : owner_(owner),
76 typeof_state_(NOT_INSIDE_TYPEOF),
77 destination_(NULL),
78 previous_(NULL) {
79 owner_->set_state(this);
80}
81
82
83CodeGenState::CodeGenState(CodeGenerator* owner,
84 TypeofState typeof_state,
85 ControlDestination* destination)
86 : owner_(owner),
87 typeof_state_(typeof_state),
88 destination_(destination),
89 previous_(owner->state()) {
90 owner_->set_state(this);
91}
92
93
94CodeGenState::~CodeGenState() {
95 ASSERT(owner_->state() == this);
96 owner_->set_state(previous_);
97}
98
99
100// -------------------------------------------------------------------------
101// Deferred code objects
102//
103// These subclasses of DeferredCode add pieces of code to the end of generated
104// code. They are branched to from the generated code, and
105// keep some slower code out of the main body of the generated code.
106// Many of them call a code stub or a runtime function.
107
108class DeferredInlineSmiAdd: public DeferredCode {
109 public:
110 DeferredInlineSmiAdd(Register dst,
111 Smi* value,
112 OverwriteMode overwrite_mode)
113 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
114 set_comment("[ DeferredInlineSmiAdd");
115 }
116
117 virtual void Generate();
118
119 private:
120 Register dst_;
121 Smi* value_;
122 OverwriteMode overwrite_mode_;
123};
124
125
126// The result of value + src is in dst. It either overflowed or was not
127// smi tagged. Undo the speculative addition and call the appropriate
128// specialized stub for add. The result is left in dst.
129class DeferredInlineSmiAddReversed: public DeferredCode {
130 public:
131 DeferredInlineSmiAddReversed(Register dst,
132 Smi* value,
133 OverwriteMode overwrite_mode)
134 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
135 set_comment("[ DeferredInlineSmiAddReversed");
136 }
137
138 virtual void Generate();
139
140 private:
141 Register dst_;
142 Smi* value_;
143 OverwriteMode overwrite_mode_;
144};
145
146
147class DeferredInlineSmiSub: public DeferredCode {
148 public:
149 DeferredInlineSmiSub(Register dst,
150 Smi* value,
151 OverwriteMode overwrite_mode)
152 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
153 set_comment("[ DeferredInlineSmiSub");
154 }
155
156 virtual void Generate();
157
158 private:
159 Register dst_;
160 Smi* value_;
161 OverwriteMode overwrite_mode_;
162};
163
164
165// Call the appropriate binary operation stub to compute src op value
166// and leave the result in dst.
167class DeferredInlineSmiOperation: public DeferredCode {
168 public:
169 DeferredInlineSmiOperation(Token::Value op,
170 Register dst,
171 Register src,
172 Smi* value,
173 OverwriteMode overwrite_mode)
174 : op_(op),
175 dst_(dst),
176 src_(src),
177 value_(value),
178 overwrite_mode_(overwrite_mode) {
179 set_comment("[ DeferredInlineSmiOperation");
180 }
181
182 virtual void Generate();
183
184 private:
185 Token::Value op_;
186 Register dst_;
187 Register src_;
188 Smi* value_;
189 OverwriteMode overwrite_mode_;
190};
191
192
193class FloatingPointHelper : public AllStatic {
194 public:
195 // Code pattern for loading a floating point value. Input value must
196 // be either a smi or a heap number object (fp value). Requirements:
197 // operand on TOS+1. Returns operand as floating point number on FPU
198 // stack.
199 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
200
201 // Code pattern for loading a floating point value. Input value must
202 // be either a smi or a heap number object (fp value). Requirements:
203 // operand in src register. Returns operand as floating point number
204 // in XMM register
205 static void LoadFloatOperand(MacroAssembler* masm,
206 Register src,
207 XMMRegister dst);
208
209 // Code pattern for loading floating point values. Input values must
210 // be either smi or heap number objects (fp values). Requirements:
211 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
212 // floating point numbers in XMM registers.
213 static void LoadFloatOperands(MacroAssembler* masm,
214 XMMRegister dst1,
215 XMMRegister dst2);
216
217 // Code pattern for loading floating point values onto the fp stack.
218 // Input values must be either smi or heap number objects (fp values).
219 // Requirements:
220 // Register version: operands in registers lhs and rhs.
221 // Stack version: operands on TOS+1 and TOS+2.
222 // Returns operands as floating point numbers on fp stack.
223 static void LoadFloatOperands(MacroAssembler* masm);
224 static void LoadFloatOperands(MacroAssembler* masm,
225 Register lhs,
226 Register rhs);
227
228 // Code pattern for loading a floating point value and converting it
229 // to a 32 bit integer. Input value must be either a smi or a heap number
230 // object.
231 // Returns operands as 32-bit sign extended integers in a general purpose
232 // registers.
233 static void LoadInt32Operand(MacroAssembler* masm,
234 const Operand& src,
235 Register dst);
236
237 // Test if operands are smi or number objects (fp). Requirements:
238 // operand_1 in rax, operand_2 in rdx; falls through on float or smi
239 // operands, jumps to the non_float label otherwise.
240 static void CheckFloatOperands(MacroAssembler* masm,
241 Label* non_float);
242
243 // Allocate a heap number in new space with undefined value.
244 // Returns tagged pointer in result, or jumps to need_gc if new space is full.
245 static void AllocateHeapNumber(MacroAssembler* masm,
246 Label* need_gc,
247 Register scratch,
248 Register result);
249};
250
251
252// -----------------------------------------------------------------------------
253// CodeGenerator implementation.
254
255CodeGenerator::CodeGenerator(int buffer_size,
256 Handle<Script> script,
257 bool is_eval)
258 : is_eval_(is_eval),
259 script_(script),
260 deferred_(8),
261 masm_(new MacroAssembler(NULL, buffer_size)),
262 scope_(NULL),
263 frame_(NULL),
264 allocator_(NULL),
265 state_(NULL),
266 loop_nesting_(0),
267 function_return_is_shadowed_(false),
268 in_spilled_code_(false) {
269}
270
271
272void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
273 // Call the runtime to declare the globals. The inevitable call
274 // will sync frame elements to memory anyway, so we do it eagerly to
275 // allow us to push the arguments directly into place.
276 frame_->SyncRange(0, frame_->element_count() - 1);
277
278 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
279 frame_->EmitPush(kScratchRegister);
280 frame_->EmitPush(rsi); // The context is the second argument.
281 frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
282 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
283 // Return value is ignored.
284}
285
286
287void CodeGenerator::GenCode(FunctionLiteral* function) {
288 // Record the position for debugging purposes.
289 CodeForFunctionPosition(function);
290 ZoneList<Statement*>* body = function->body();
291
292 // Initialize state.
293 ASSERT(scope_ == NULL);
294 scope_ = function->scope();
295 ASSERT(allocator_ == NULL);
296 RegisterAllocator register_allocator(this);
297 allocator_ = &register_allocator;
298 ASSERT(frame_ == NULL);
299 frame_ = new VirtualFrame();
300 set_in_spilled_code(false);
301
302 // Adjust for function-level loop nesting.
303 loop_nesting_ += function->loop_nesting();
304
305 JumpTarget::set_compiling_deferred_code(false);
306
307#ifdef DEBUG
308 if (strlen(FLAG_stop_at) > 0 &&
309 function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
310 frame_->SpillAll();
311 __ int3();
312 }
313#endif
314
315 // New scope to get automatic timing calculation.
316 { // NOLINT
317 HistogramTimerScope codegen_timer(&Counters::code_generation);
318 CodeGenState state(this);
319
320 // Entry:
321 // Stack: receiver, arguments, return address.
322 // rbp: caller's frame pointer
323 // rsp: stack pointer
324 // rdi: called JS function
325 // rsi: callee's context
326 allocator_->Initialize();
327 frame_->Enter();
328
329 // Allocate space for locals and initialize them.
330 frame_->AllocateStackSlots();
331 // Initialize the function return target after the locals are set
332 // up, because it needs the expected frame height from the frame.
333 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
334 function_return_is_shadowed_ = false;
335
336 // Allocate the local context if needed.
337 if (scope_->num_heap_slots() > 0) {
338 Comment cmnt(masm_, "[ allocate local context");
339 // Allocate local context.
340 // Get outer context and create a new context based on it.
341 frame_->PushFunction();
342 Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
343
344 // Update context local.
345 frame_->SaveContextRegister();
346
347 // Verify that the runtime call result and rsi agree.
348 if (FLAG_debug_code) {
349 __ cmpq(context.reg(), rsi);
350 __ Assert(equal, "Runtime::NewContext should end up in rsi");
351 }
352 }
353
354 // TODO(1241774): Improve this code:
355 // 1) only needed if we have a context
356 // 2) no need to recompute context ptr every single time
357 // 3) don't copy parameter operand code from SlotOperand!
358 {
359 Comment cmnt2(masm_, "[ copy context parameters into .context");
360
361 // Note that iteration order is relevant here! If we have the same
362 // parameter twice (e.g., function (x, y, x)), and that parameter
363 // needs to be copied into the context, it must be the last argument
364 // passed to the parameter that needs to be copied. This is a rare
365 // case so we don't check for it, instead we rely on the copying
366 // order: such a parameter is copied repeatedly into the same
367 // context location and thus the last value is what is seen inside
368 // the function.
369 for (int i = 0; i < scope_->num_parameters(); i++) {
370 Variable* par = scope_->parameter(i);
371 Slot* slot = par->slot();
372 if (slot != NULL && slot->type() == Slot::CONTEXT) {
373 // The use of SlotOperand below is safe in unspilled code
374 // because the slot is guaranteed to be a context slot.
375 //
376 // There are no parameters in the global scope.
377 ASSERT(!scope_->is_global_scope());
378 frame_->PushParameterAt(i);
379 Result value = frame_->Pop();
380 value.ToRegister();
381
382 // SlotOperand loads context.reg() with the context object
383 // stored to, used below in RecordWrite.
384 Result context = allocator_->Allocate();
385 ASSERT(context.is_valid());
386 __ movq(SlotOperand(slot, context.reg()), value.reg());
387 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
388 Result scratch = allocator_->Allocate();
389 ASSERT(scratch.is_valid());
390 frame_->Spill(context.reg());
391 frame_->Spill(value.reg());
392 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
393 }
394 }
395 }
396
397 // Store the arguments object. This must happen after context
398 // initialization because the arguments object may be stored in
399 // the context.
400 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
401 StoreArgumentsObject(true);
402 }
403
404 // Generate code to 'execute' declarations and initialize functions
405 // (source elements). In case of an illegal redeclaration we need to
406 // handle that instead of processing the declarations.
407 if (scope_->HasIllegalRedeclaration()) {
408 Comment cmnt(masm_, "[ illegal redeclarations");
409 scope_->VisitIllegalRedeclaration(this);
410 } else {
411 Comment cmnt(masm_, "[ declarations");
412 ProcessDeclarations(scope_->declarations());
413 // Bail out if a stack-overflow exception occurred when processing
414 // declarations.
415 if (HasStackOverflow()) return;
416 }
417
418 if (FLAG_trace) {
419 frame_->CallRuntime(Runtime::kTraceEnter, 0);
420 // Ignore the return value.
421 }
422 CheckStack();
423
424 // Compile the body of the function in a vanilla state. Don't
425 // bother compiling all the code if the scope has an illegal
426 // redeclaration.
427 if (!scope_->HasIllegalRedeclaration()) {
428 Comment cmnt(masm_, "[ function body");
429#ifdef DEBUG
430 bool is_builtin = Bootstrapper::IsActive();
431 bool should_trace =
432 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
433 if (should_trace) {
434 frame_->CallRuntime(Runtime::kDebugTrace, 0);
435 // Ignore the return value.
436 }
437#endif
438 VisitStatements(body);
439
440 // Handle the return from the function.
441 if (has_valid_frame()) {
442 // If there is a valid frame, control flow can fall off the end of
443 // the body. In that case there is an implicit return statement.
444 ASSERT(!function_return_is_shadowed_);
445 CodeForReturnPosition(function);
446 frame_->PrepareForReturn();
447 Result undefined(Factory::undefined_value());
448 if (function_return_.is_bound()) {
449 function_return_.Jump(&undefined);
450 } else {
451 function_return_.Bind(&undefined);
452 GenerateReturnSequence(&undefined);
453 }
454 } else if (function_return_.is_linked()) {
455 // If the return target has dangling jumps to it, then we have not
456 // yet generated the return sequence. This can happen when (a)
457 // control does not flow off the end of the body so we did not
458 // compile an artificial return statement just above, and (b) there
459 // are return statements in the body but (c) they are all shadowed.
460 Result return_value;
461 function_return_.Bind(&return_value);
462 GenerateReturnSequence(&return_value);
463 }
464 }
465 }
466
467 // Adjust for function-level loop nesting.
468 loop_nesting_ -= function->loop_nesting();
469
470 // Code generation state must be reset.
471 ASSERT(state_ == NULL);
472 ASSERT(loop_nesting() == 0);
473 ASSERT(!function_return_is_shadowed_);
474 function_return_.Unuse();
475 DeleteFrame();
476
477 // Process any deferred code using the register allocator.
478 if (!HasStackOverflow()) {
479 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
480 JumpTarget::set_compiling_deferred_code(true);
481 ProcessDeferred();
482 JumpTarget::set_compiling_deferred_code(false);
483 }
484
485 // There is no need to delete the register allocator, it is a
486 // stack-allocated local.
487 allocator_ = NULL;
488 scope_ = NULL;
489}
490
491void CodeGenerator::GenerateReturnSequence(Result* return_value) {
492 // The return value is a live (but not currently reference counted)
493 // reference to rax. This is safe because the current frame does not
494 // contain a reference to rax (it is prepared for the return by spilling
495 // all registers).
496 if (FLAG_trace) {
497 frame_->Push(return_value);
498 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
499 }
500 return_value->ToRegister(rax);
501
502 // Add a label for checking the size of the code used for returning.
503#ifdef DEBUG
504 Label check_exit_codesize;
505 masm_->bind(&check_exit_codesize);
506#endif
507
508 // Leave the frame and return popping the arguments and the
509 // receiver.
510 frame_->Exit();
511 masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
512#ifdef ENABLE_DEBUGGER_SUPPORT
513 // Add padding that will be overwritten by a debugger breakpoint.
514 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
515 // with length 7 (3 + 1 + 3).
516 const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
517 for (int i = 0; i < kPadding; ++i) {
518 masm_->int3();
519 }
520 // Check that the size of the code used for returning matches what is
521 // expected by the debugger.
522 ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
523 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
524#endif
525 DeleteFrame();
526}
527
528
529#ifdef DEBUG
530bool CodeGenerator::HasValidEntryRegisters() {
531 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
532 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
533 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
534 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
535 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
536 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
537 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
538 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
539 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
540 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
541 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
542}
543#endif
544
545
546class DeferredReferenceGetKeyedValue: public DeferredCode {
547 public:
548 explicit DeferredReferenceGetKeyedValue(Register dst,
549 Register receiver,
550 Register key,
551 bool is_global)
552 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
553 set_comment("[ DeferredReferenceGetKeyedValue");
554 }
555
556 virtual void Generate();
557
558 Label* patch_site() { return &patch_site_; }
559
560 private:
561 Label patch_site_;
562 Register dst_;
563 Register receiver_;
564 Register key_;
565 bool is_global_;
566};
567
568
569void DeferredReferenceGetKeyedValue::Generate() {
570 __ push(receiver_); // First IC argument.
571 __ push(key_); // Second IC argument.
572
573 // Calculate the delta from the IC call instruction to the map check
574 // movq instruction in the inlined version. This delta is stored in
575 // a test(rax, delta) instruction after the call so that we can find
576 // it in the IC initialization code and patch the movq instruction.
577 // This means that we cannot allow test instructions after calls to
578 // KeyedLoadIC stubs in other places.
579 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
580 RelocInfo::Mode mode = is_global_
581 ? RelocInfo::CODE_TARGET_CONTEXT
582 : RelocInfo::CODE_TARGET;
583 __ Call(ic, mode);
584 // The delta from the start of the map-compare instruction to the
585 // test instruction. We use masm_-> directly here instead of the __
586 // macro because the macro sometimes uses macro expansion to turn
587 // into something that can't return a value. This is encountered
588 // when doing generated code coverage tests.
589 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
590 // Here we use masm_-> instead of the __ macro because this is the
591 // instruction that gets patched and coverage code gets in the way.
592 // TODO(X64): Consider whether it's worth switching the test to a
593 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
594 // be generated normally.
595 masm_->testl(rax, Immediate(-delta_to_patch_site));
596 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
597
598 if (!dst_.is(rax)) __ movq(dst_, rax);
599 __ pop(key_);
600 __ pop(receiver_);
601}
602
603
604class DeferredReferenceSetKeyedValue: public DeferredCode {
605 public:
606 DeferredReferenceSetKeyedValue(Register value,
607 Register key,
608 Register receiver)
609 : value_(value), key_(key), receiver_(receiver) {
610 set_comment("[ DeferredReferenceSetKeyedValue");
611 }
612
613 virtual void Generate();
614
615 Label* patch_site() { return &patch_site_; }
616
617 private:
618 Register value_;
619 Register key_;
620 Register receiver_;
621 Label patch_site_;
622};
623
624
625void DeferredReferenceSetKeyedValue::Generate() {
626 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
627 // Push receiver and key arguments on the stack.
628 __ push(receiver_);
629 __ push(key_);
630 // Move value argument to eax as expected by the IC stub.
631 if (!value_.is(rax)) __ movq(rax, value_);
632 // Call the IC stub.
633 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
634 __ Call(ic, RelocInfo::CODE_TARGET);
635 // The delta from the start of the map-compare instructions (initial movq)
636 // to the test instruction. We use masm_-> directly here instead of the
637 // __ macro because the macro sometimes uses macro expansion to turn
638 // into something that can't return a value. This is encountered
639 // when doing generated code coverage tests.
640 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
641 // Here we use masm_-> instead of the __ macro because this is the
642 // instruction that gets patched and coverage code gets in the way.
643 masm_->testl(rax, Immediate(-delta_to_patch_site));
644 // Restore value (returned from store IC), key and receiver
645 // registers.
646 if (!value_.is(rax)) __ movq(value_, rax);
647 __ pop(key_);
648 __ pop(receiver_);
649}
650
651
652class CallFunctionStub: public CodeStub {
653 public:
654 CallFunctionStub(int argc, InLoopFlag in_loop)
655 : argc_(argc), in_loop_(in_loop) { }
656
657 void Generate(MacroAssembler* masm);
658
659 private:
660 int argc_;
661 InLoopFlag in_loop_;
662
663#ifdef DEBUG
664 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
665#endif
666
667 Major MajorKey() { return CallFunction; }
668 int MinorKey() { return argc_; }
669 InLoopFlag InLoop() { return in_loop_; }
670};
671
672
673void CodeGenerator::CallApplyLazy(Property* apply,
674 Expression* receiver,
675 VariableProxy* arguments,
676 int position) {
677 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
678 ASSERT(arguments->IsArguments());
679
680 JumpTarget slow, done;
681
682 // Load the apply function onto the stack. This will usually
683 // give us a megamorphic load site. Not super, but it works.
684 Reference ref(this, apply);
685 ref.GetValue(NOT_INSIDE_TYPEOF);
686 ASSERT(ref.type() == Reference::NAMED);
687
688 // Load the receiver and the existing arguments object onto the
689 // expression stack. Avoid allocating the arguments object here.
690 Load(receiver);
691 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
692
693 // Emit the source position information after having loaded the
694 // receiver and the arguments.
695 CodeForSourcePosition(position);
696
697 // Check if the arguments object has been lazily allocated
698 // already. If so, just use that instead of copying the arguments
699 // from the stack. This also deals with cases where a local variable
700 // named 'arguments' has been introduced.
701 frame_->Dup();
702 Result probe = frame_->Pop();
703 bool try_lazy = true;
704 if (probe.is_constant()) {
705 try_lazy = probe.handle()->IsTheHole();
706 } else {
707 __ Cmp(probe.reg(), Factory::the_hole_value());
708 probe.Unuse();
709 slow.Branch(not_equal);
710 }
711
712 if (try_lazy) {
713 JumpTarget build_args;
714
715 // Get rid of the arguments object probe.
716 frame_->Drop();
717
718 // Before messing with the execution stack, we sync all
719 // elements. This is bound to happen anyway because we're
720 // about to call a function.
721 frame_->SyncRange(0, frame_->element_count() - 1);
722
723 // Check that the receiver really is a JavaScript object.
724 {
725 frame_->PushElementAt(0);
726 Result receiver = frame_->Pop();
727 receiver.ToRegister();
728 Condition is_smi = masm_->CheckSmi(receiver.reg());
729 build_args.Branch(is_smi);
730 // We allow all JSObjects including JSFunctions. As long as
731 // JS_FUNCTION_TYPE is the last instance type and it is right
732 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
733 // bound.
734 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
735 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
736 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
737 build_args.Branch(below);
738 }
739
740 // Verify that we're invoking Function.prototype.apply.
741 {
742 frame_->PushElementAt(1);
743 Result apply = frame_->Pop();
744 apply.ToRegister();
745 Condition is_smi = masm_->CheckSmi(apply.reg());
746 build_args.Branch(is_smi);
747 Result tmp = allocator_->Allocate();
748 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
749 build_args.Branch(not_equal);
750 __ movq(tmp.reg(),
751 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
752 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
753 __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
754 apply_code);
755 build_args.Branch(not_equal);
756 }
757
758 // Get the function receiver from the stack. Check that it
759 // really is a function.
760 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
761 Condition is_smi = masm_->CheckSmi(rdi);
762 build_args.Branch(is_smi);
763 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
764 build_args.Branch(not_equal);
765
766 // Copy the arguments to this function possibly from the
767 // adaptor frame below it.
768 Label invoke, adapted;
769 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
770 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
771 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
772 __ j(equal, &adapted);
773
774 // No arguments adaptor frame. Copy fixed number of arguments.
775 __ movq(rax, Immediate(scope_->num_parameters()));
776 for (int i = 0; i < scope_->num_parameters(); i++) {
777 __ push(frame_->ParameterAt(i));
778 }
779 __ jmp(&invoke);
780
781 // Arguments adaptor frame present. Copy arguments from there, but
782 // avoid copying too many arguments to avoid stack overflows.
783 __ bind(&adapted);
784 static const uint32_t kArgumentsLimit = 1 * KB;
785 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
786 __ SmiToInteger32(rax, rax);
787 __ movq(rcx, rax);
788 __ cmpq(rax, Immediate(kArgumentsLimit));
789 build_args.Branch(above);
790
791 // Loop through the arguments pushing them onto the execution
792 // stack. We don't inform the virtual frame of the push, so we don't
793 // have to worry about getting rid of the elements from the virtual
794 // frame.
795 Label loop;
796 __ bind(&loop);
797 __ testl(rcx, rcx);
798 __ j(zero, &invoke);
799 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
800 __ decl(rcx);
801 __ jmp(&loop);
802
803 // Invoke the function. The virtual frame knows about the receiver
804 // so make sure to forget that explicitly.
805 __ bind(&invoke);
806 ParameterCount actual(rax);
807 __ InvokeFunction(rdi, actual, CALL_FUNCTION);
808 frame_->Forget(1);
809 Result result = allocator()->Allocate(rax);
810 frame_->SetElementAt(0, &result);
811 done.Jump();
812
813 // Slow-case: Allocate the arguments object since we know it isn't
814 // there, and fall-through to the slow-case where we call
815 // Function.prototype.apply.
816 build_args.Bind();
817 Result arguments_object = StoreArgumentsObject(false);
818 frame_->Push(&arguments_object);
819 slow.Bind();
820 }
821
822 // Flip the apply function and the function to call on the stack, so
823 // the function looks like the receiver of the apply call. This way,
824 // the generic Function.prototype.apply implementation can deal with
825 // the call like it usually does.
826 Result a2 = frame_->Pop();
827 Result a1 = frame_->Pop();
828 Result ap = frame_->Pop();
829 Result fn = frame_->Pop();
830 frame_->Push(&ap);
831 frame_->Push(&fn);
832 frame_->Push(&a1);
833 frame_->Push(&a2);
834 CallFunctionStub call_function(2, NOT_IN_LOOP);
835 Result res = frame_->CallStub(&call_function, 3);
836 frame_->Push(&res);
837
838 // All done. Restore context register after call.
839 if (try_lazy) done.Bind();
840 frame_->RestoreContextRegister();
841}
842
843
844class DeferredStackCheck: public DeferredCode {
845 public:
846 DeferredStackCheck() {
847 set_comment("[ DeferredStackCheck");
848 }
849
850 virtual void Generate();
851};
852
853
854void DeferredStackCheck::Generate() {
855 StackCheckStub stub;
856 __ CallStub(&stub);
857}
858
859
860void CodeGenerator::CheckStack() {
861 if (FLAG_check_stack) {
862 DeferredStackCheck* deferred = new DeferredStackCheck;
863 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
864 deferred->Branch(below);
865 deferred->BindExit();
866 }
867}
868
869
870void CodeGenerator::VisitAndSpill(Statement* statement) {
871 // TODO(X64): No architecture specific code. Move to shared location.
872 ASSERT(in_spilled_code());
873 set_in_spilled_code(false);
874 Visit(statement);
875 if (frame_ != NULL) {
876 frame_->SpillAll();
877 }
878 set_in_spilled_code(true);
879}
880
881
882void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
883 ASSERT(in_spilled_code());
884 set_in_spilled_code(false);
885 VisitStatements(statements);
886 if (frame_ != NULL) {
887 frame_->SpillAll();
888 }
889 set_in_spilled_code(true);
890}
891
892
893void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
894 ASSERT(!in_spilled_code());
895 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
896 Visit(statements->at(i));
897 }
898}
899
900
901void CodeGenerator::VisitBlock(Block* node) {
902 ASSERT(!in_spilled_code());
903 Comment cmnt(masm_, "[ Block");
904 CodeForStatementPosition(node);
905 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
906 VisitStatements(node->statements());
907 if (node->break_target()->is_linked()) {
908 node->break_target()->Bind();
909 }
910 node->break_target()->Unuse();
911}
912
913
914void CodeGenerator::VisitDeclaration(Declaration* node) {
915 Comment cmnt(masm_, "[ Declaration");
916 Variable* var = node->proxy()->var();
917 ASSERT(var != NULL); // must have been resolved
918 Slot* slot = var->slot();
919
920 // If it was not possible to allocate the variable at compile time,
921 // we need to "declare" it at runtime to make sure it actually
922 // exists in the local context.
923 if (slot != NULL && slot->type() == Slot::LOOKUP) {
924 // Variables with a "LOOKUP" slot were introduced as non-locals
925 // during variable resolution and must have mode DYNAMIC.
926 ASSERT(var->is_dynamic());
927 // For now, just do a runtime call. Sync the virtual frame eagerly
928 // so we can simply push the arguments into place.
929 frame_->SyncRange(0, frame_->element_count() - 1);
930 frame_->EmitPush(rsi);
931 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
932 frame_->EmitPush(kScratchRegister);
933 // Declaration nodes are always introduced in one of two modes.
934 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
935 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
936 frame_->EmitPush(Immediate(Smi::FromInt(attr)));
937 // Push initial value, if any.
938 // Note: For variables we must not push an initial value (such as
939 // 'undefined') because we may have a (legal) redeclaration and we
940 // must not destroy the current value.
941 if (node->mode() == Variable::CONST) {
942 frame_->EmitPush(Heap::kTheHoleValueRootIndex);
943 } else if (node->fun() != NULL) {
944 Load(node->fun());
945 } else {
946 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
947 }
948 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
949 // Ignore the return value (declarations are statements).
950 return;
951 }
952
953 ASSERT(!var->is_global());
954
955 // If we have a function or a constant, we need to initialize the variable.
956 Expression* val = NULL;
957 if (node->mode() == Variable::CONST) {
958 val = new Literal(Factory::the_hole_value());
959 } else {
960 val = node->fun(); // NULL if we don't have a function
961 }
962
963 if (val != NULL) {
964 {
965 // Set the initial value.
966 Reference target(this, node->proxy());
967 Load(val);
968 target.SetValue(NOT_CONST_INIT);
969 // The reference is removed from the stack (preserving TOS) when
970 // it goes out of scope.
971 }
972 // Get rid of the assigned value (declarations are statements).
973 frame_->Drop();
974 }
975}
976
977
978void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
979 ASSERT(!in_spilled_code());
980 Comment cmnt(masm_, "[ ExpressionStatement");
981 CodeForStatementPosition(node);
982 Expression* expression = node->expression();
983 expression->MarkAsStatement();
984 Load(expression);
985 // Remove the lingering expression result from the top of stack.
986 frame_->Drop();
987}
988
989
990void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
991 ASSERT(!in_spilled_code());
992 Comment cmnt(masm_, "// EmptyStatement");
993 CodeForStatementPosition(node);
994 // nothing to do
995}
996
997
998void CodeGenerator::VisitIfStatement(IfStatement* node) {
999 ASSERT(!in_spilled_code());
1000 Comment cmnt(masm_, "[ IfStatement");
1001 // Generate different code depending on which parts of the if statement
1002 // are present or not.
1003 bool has_then_stm = node->HasThenStatement();
1004 bool has_else_stm = node->HasElseStatement();
1005
1006 CodeForStatementPosition(node);
1007 JumpTarget exit;
1008 if (has_then_stm && has_else_stm) {
1009 JumpTarget then;
1010 JumpTarget else_;
1011 ControlDestination dest(&then, &else_, true);
1012 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1013
1014 if (dest.false_was_fall_through()) {
1015 // The else target was bound, so we compile the else part first.
1016 Visit(node->else_statement());
1017
1018 // We may have dangling jumps to the then part.
1019 if (then.is_linked()) {
1020 if (has_valid_frame()) exit.Jump();
1021 then.Bind();
1022 Visit(node->then_statement());
1023 }
1024 } else {
1025 // The then target was bound, so we compile the then part first.
1026 Visit(node->then_statement());
1027
1028 if (else_.is_linked()) {
1029 if (has_valid_frame()) exit.Jump();
1030 else_.Bind();
1031 Visit(node->else_statement());
1032 }
1033 }
1034
1035 } else if (has_then_stm) {
1036 ASSERT(!has_else_stm);
1037 JumpTarget then;
1038 ControlDestination dest(&then, &exit, true);
1039 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1040
1041 if (dest.false_was_fall_through()) {
1042 // The exit label was bound. We may have dangling jumps to the
1043 // then part.
1044 if (then.is_linked()) {
1045 exit.Unuse();
1046 exit.Jump();
1047 then.Bind();
1048 Visit(node->then_statement());
1049 }
1050 } else {
1051 // The then label was bound.
1052 Visit(node->then_statement());
1053 }
1054
1055 } else if (has_else_stm) {
1056 ASSERT(!has_then_stm);
1057 JumpTarget else_;
1058 ControlDestination dest(&exit, &else_, false);
1059 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1060
1061 if (dest.true_was_fall_through()) {
1062 // The exit label was bound. We may have dangling jumps to the
1063 // else part.
1064 if (else_.is_linked()) {
1065 exit.Unuse();
1066 exit.Jump();
1067 else_.Bind();
1068 Visit(node->else_statement());
1069 }
1070 } else {
1071 // The else label was bound.
1072 Visit(node->else_statement());
1073 }
1074
1075 } else {
1076 ASSERT(!has_then_stm && !has_else_stm);
1077 // We only care about the condition's side effects (not its value
1078 // or control flow effect). LoadCondition is called without
1079 // forcing control flow.
1080 ControlDestination dest(&exit, &exit, true);
1081 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
1082 if (!dest.is_used()) {
1083 // We got a value on the frame rather than (or in addition to)
1084 // control flow.
1085 frame_->Drop();
1086 }
1087 }
1088
1089 if (exit.is_linked()) {
1090 exit.Bind();
1091 }
1092}
1093
1094
1095void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1096 ASSERT(!in_spilled_code());
1097 Comment cmnt(masm_, "[ ContinueStatement");
1098 CodeForStatementPosition(node);
1099 node->target()->continue_target()->Jump();
1100}
1101
1102
1103void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1104 ASSERT(!in_spilled_code());
1105 Comment cmnt(masm_, "[ BreakStatement");
1106 CodeForStatementPosition(node);
1107 node->target()->break_target()->Jump();
1108}
1109
1110
1111void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1112 ASSERT(!in_spilled_code());
1113 Comment cmnt(masm_, "[ ReturnStatement");
1114
1115 CodeForStatementPosition(node);
1116 Load(node->expression());
1117 Result return_value = frame_->Pop();
1118 if (function_return_is_shadowed_) {
1119 function_return_.Jump(&return_value);
1120 } else {
1121 frame_->PrepareForReturn();
1122 if (function_return_.is_bound()) {
1123 // If the function return label is already bound we reuse the
1124 // code by jumping to the return site.
1125 function_return_.Jump(&return_value);
1126 } else {
1127 function_return_.Bind(&return_value);
1128 GenerateReturnSequence(&return_value);
1129 }
1130 }
1131}
1132
1133
1134void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1135 ASSERT(!in_spilled_code());
1136 Comment cmnt(masm_, "[ WithEnterStatement");
1137 CodeForStatementPosition(node);
1138 Load(node->expression());
1139 Result context;
1140 if (node->is_catch_block()) {
1141 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1142 } else {
1143 context = frame_->CallRuntime(Runtime::kPushContext, 1);
1144 }
1145
1146 // Update context local.
1147 frame_->SaveContextRegister();
1148
1149 // Verify that the runtime call result and rsi agree.
1150 if (FLAG_debug_code) {
1151 __ cmpq(context.reg(), rsi);
1152 __ Assert(equal, "Runtime::NewContext should end up in rsi");
1153 }
1154}
1155
1156
1157void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1158 ASSERT(!in_spilled_code());
1159 Comment cmnt(masm_, "[ WithExitStatement");
1160 CodeForStatementPosition(node);
1161 // Pop context.
1162 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1163 // Update context local.
1164 frame_->SaveContextRegister();
1165}
1166
1167
1168void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1169 // TODO(X64): This code is completely generic and should be moved somewhere
1170 // where it can be shared between architectures.
1171 ASSERT(!in_spilled_code());
1172 Comment cmnt(masm_, "[ SwitchStatement");
1173 CodeForStatementPosition(node);
1174 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1175
1176 // Compile the switch value.
1177 Load(node->tag());
1178
1179 ZoneList<CaseClause*>* cases = node->cases();
1180 int length = cases->length();
1181 CaseClause* default_clause = NULL;
1182
1183 JumpTarget next_test;
1184 // Compile the case label expressions and comparisons. Exit early
1185 // if a comparison is unconditionally true. The target next_test is
1186 // bound before the loop in order to indicate control flow to the
1187 // first comparison.
1188 next_test.Bind();
1189 for (int i = 0; i < length && !next_test.is_unused(); i++) {
1190 CaseClause* clause = cases->at(i);
1191 // The default is not a test, but remember it for later.
1192 if (clause->is_default()) {
1193 default_clause = clause;
1194 continue;
1195 }
1196
1197 Comment cmnt(masm_, "[ Case comparison");
1198 // We recycle the same target next_test for each test. Bind it if
1199 // the previous test has not done so and then unuse it for the
1200 // loop.
1201 if (next_test.is_linked()) {
1202 next_test.Bind();
1203 }
1204 next_test.Unuse();
1205
1206 // Duplicate the switch value.
1207 frame_->Dup();
1208
1209 // Compile the label expression.
1210 Load(clause->label());
1211
1212 // Compare and branch to the body if true or the next test if
1213 // false. Prefer the next test as a fall through.
1214 ControlDestination dest(clause->body_target(), &next_test, false);
1215 Comparison(equal, true, &dest);
1216
1217 // If the comparison fell through to the true target, jump to the
1218 // actual body.
1219 if (dest.true_was_fall_through()) {
1220 clause->body_target()->Unuse();
1221 clause->body_target()->Jump();
1222 }
1223 }
1224
1225 // If there was control flow to a next test from the last one
1226 // compiled, compile a jump to the default or break target.
1227 if (!next_test.is_unused()) {
1228 if (next_test.is_linked()) {
1229 next_test.Bind();
1230 }
1231 // Drop the switch value.
1232 frame_->Drop();
1233 if (default_clause != NULL) {
1234 default_clause->body_target()->Jump();
1235 } else {
1236 node->break_target()->Jump();
1237 }
1238 }
1239
1240 // The last instruction emitted was a jump, either to the default
1241 // clause or the break target, or else to a case body from the loop
1242 // that compiles the tests.
1243 ASSERT(!has_valid_frame());
1244 // Compile case bodies as needed.
1245 for (int i = 0; i < length; i++) {
1246 CaseClause* clause = cases->at(i);
1247
1248 // There are two ways to reach the body: from the corresponding
1249 // test or as the fall through of the previous body.
1250 if (clause->body_target()->is_linked() || has_valid_frame()) {
1251 if (clause->body_target()->is_linked()) {
1252 if (has_valid_frame()) {
1253 // If we have both a jump to the test and a fall through, put
1254 // a jump on the fall through path to avoid the dropping of
1255 // the switch value on the test path. The exception is the
1256 // default which has already had the switch value dropped.
1257 if (clause->is_default()) {
1258 clause->body_target()->Bind();
1259 } else {
1260 JumpTarget body;
1261 body.Jump();
1262 clause->body_target()->Bind();
1263 frame_->Drop();
1264 body.Bind();
1265 }
1266 } else {
1267 // No fall through to worry about.
1268 clause->body_target()->Bind();
1269 if (!clause->is_default()) {
1270 frame_->Drop();
1271 }
1272 }
1273 } else {
1274 // Otherwise, we have only fall through.
1275 ASSERT(has_valid_frame());
1276 }
1277
1278 // We are now prepared to compile the body.
1279 Comment cmnt(masm_, "[ Case body");
1280 VisitStatements(clause->statements());
1281 }
1282 clause->body_target()->Unuse();
1283 }
1284
1285 // We may not have a valid frame here so bind the break target only
1286 // if needed.
1287 if (node->break_target()->is_linked()) {
1288 node->break_target()->Bind();
1289 }
1290 node->break_target()->Unuse();
1291}
1292
1293
1294void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
1295 ASSERT(!in_spilled_code());
1296 Comment cmnt(masm_, "[ LoopStatement");
1297 CodeForStatementPosition(node);
1298 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1299
1300 // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
1301 // known result for the test expression, with no side effects.
1302 enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
1303 if (node->cond() == NULL) {
1304 ASSERT(node->type() == LoopStatement::FOR_LOOP);
1305 info = ALWAYS_TRUE;
1306 } else {
1307 Literal* lit = node->cond()->AsLiteral();
1308 if (lit != NULL) {
1309 if (lit->IsTrue()) {
1310 info = ALWAYS_TRUE;
1311 } else if (lit->IsFalse()) {
1312 info = ALWAYS_FALSE;
1313 }
1314 }
1315 }
1316
1317 switch (node->type()) {
1318 case LoopStatement::DO_LOOP: {
1319 JumpTarget body(JumpTarget::BIDIRECTIONAL);
1320 IncrementLoopNesting();
1321
1322 // Label the top of the loop for the backward jump if necessary.
1323 if (info == ALWAYS_TRUE) {
1324 // Use the continue target.
1325 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1326 node->continue_target()->Bind();
1327 } else if (info == ALWAYS_FALSE) {
1328 // No need to label it.
1329 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1330 } else {
1331 // Continue is the test, so use the backward body target.
1332 ASSERT(info == DONT_KNOW);
1333 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1334 body.Bind();
1335 }
1336
1337 CheckStack(); // TODO(1222600): ignore if body contains calls.
1338 Visit(node->body());
1339
1340 // Compile the test.
1341 if (info == ALWAYS_TRUE) {
1342 // If control flow can fall off the end of the body, jump back
1343 // to the top and bind the break target at the exit.
1344 if (has_valid_frame()) {
1345 node->continue_target()->Jump();
1346 }
1347 if (node->break_target()->is_linked()) {
1348 node->break_target()->Bind();
1349 }
1350
1351 } else if (info == ALWAYS_FALSE) {
1352 // We may have had continues or breaks in the body.
1353 if (node->continue_target()->is_linked()) {
1354 node->continue_target()->Bind();
1355 }
1356 if (node->break_target()->is_linked()) {
1357 node->break_target()->Bind();
1358 }
1359
1360 } else {
1361 ASSERT(info == DONT_KNOW);
1362 // We have to compile the test expression if it can be reached by
1363 // control flow falling out of the body or via continue.
1364 if (node->continue_target()->is_linked()) {
1365 node->continue_target()->Bind();
1366 }
1367 if (has_valid_frame()) {
1368 ControlDestination dest(&body, node->break_target(), false);
1369 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1370 }
1371 if (node->break_target()->is_linked()) {
1372 node->break_target()->Bind();
1373 }
1374 }
1375 break;
1376 }
1377
1378 case LoopStatement::WHILE_LOOP: {
1379 // Do not duplicate conditions that may have function literal
1380 // subexpressions. This can cause us to compile the function
1381 // literal twice.
1382 bool test_at_bottom = !node->may_have_function_literal();
1383
1384 IncrementLoopNesting();
1385
1386 // If the condition is always false and has no side effects, we
1387 // do not need to compile anything.
1388 if (info == ALWAYS_FALSE) break;
1389
1390 JumpTarget body;
1391 if (test_at_bottom) {
1392 body.set_direction(JumpTarget::BIDIRECTIONAL);
1393 }
1394
1395 // Based on the condition analysis, compile the test as necessary.
1396 if (info == ALWAYS_TRUE) {
1397 // We will not compile the test expression. Label the top of
1398 // the loop with the continue target.
1399 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1400 node->continue_target()->Bind();
1401 } else {
1402 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
1403 if (test_at_bottom) {
1404 // Continue is the test at the bottom, no need to label the
1405 // test at the top. The body is a backward target.
1406 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1407 } else {
1408 // Label the test at the top as the continue target. The
1409 // body is a forward-only target.
1410 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1411 node->continue_target()->Bind();
1412 }
1413 // Compile the test with the body as the true target and
1414 // preferred fall-through and with the break target as the
1415 // false target.
1416 ControlDestination dest(&body, node->break_target(), true);
1417 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1418
1419 if (dest.false_was_fall_through()) {
1420 // If we got the break target as fall-through, the test may
1421 // have been unconditionally false (if there are no jumps to
1422 // the body).
1423 if (!body.is_linked()) break;
1424
1425 // Otherwise, jump around the body on the fall through and
1426 // then bind the body target.
1427 node->break_target()->Unuse();
1428 node->break_target()->Jump();
1429 body.Bind();
1430 }
1431 }
1432
1433 CheckStack(); // TODO(1222600): ignore if body contains calls.
1434 Visit(node->body());
1435
1436 // Based on the condition analysis, compile the backward jump as
1437 // necessary.
1438 if (info == ALWAYS_TRUE) {
1439 // The loop body has been labeled with the continue target.
1440 if (has_valid_frame()) {
1441 node->continue_target()->Jump();
1442 }
1443 } else {
1444 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
1445 if (test_at_bottom) {
1446 // If we have chosen to recompile the test at the bottom,
1447 // then it is the continue target.
1448 if (node->continue_target()->is_linked()) {
1449 node->continue_target()->Bind();
1450 }
1451 if (has_valid_frame()) {
1452 // The break target is the fall-through (body is a backward
1453 // jump from here and thus an invalid fall-through).
1454 ControlDestination dest(&body, node->break_target(), false);
1455 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1456 }
1457 } else {
1458 // If we have chosen not to recompile the test at the
1459 // bottom, jump back to the one at the top.
1460 if (has_valid_frame()) {
1461 node->continue_target()->Jump();
1462 }
1463 }
1464 }
1465
1466 // The break target may be already bound (by the condition), or
1467 // there may not be a valid frame. Bind it only if needed.
1468 if (node->break_target()->is_linked()) {
1469 node->break_target()->Bind();
1470 }
1471 break;
1472 }
1473
1474 case LoopStatement::FOR_LOOP: {
1475 // Do not duplicate conditions that may have function literal
1476 // subexpressions. This can cause us to compile the function
1477 // literal twice.
1478 bool test_at_bottom = !node->may_have_function_literal();
1479
1480 // Compile the init expression if present.
1481 if (node->init() != NULL) {
1482 Visit(node->init());
1483 }
1484
1485 IncrementLoopNesting();
1486
1487 // If the condition is always false and has no side effects, we
1488 // do not need to compile anything else.
1489 if (info == ALWAYS_FALSE) break;
1490
1491 // Target for backward edge if no test at the bottom, otherwise
1492 // unused.
1493 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1494
1495 // Target for backward edge if there is a test at the bottom,
1496 // otherwise used as target for test at the top.
1497 JumpTarget body;
1498 if (test_at_bottom) {
1499 body.set_direction(JumpTarget::BIDIRECTIONAL);
1500 }
1501
1502 // Based on the condition analysis, compile the test as necessary.
1503 if (info == ALWAYS_TRUE) {
1504 // We will not compile the test expression. Label the top of
1505 // the loop.
1506 if (node->next() == NULL) {
1507 // Use the continue target if there is no update expression.
1508 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1509 node->continue_target()->Bind();
1510 } else {
1511 // Otherwise use the backward loop target.
1512 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1513 loop.Bind();
1514 }
1515 } else {
1516 ASSERT(info == DONT_KNOW);
1517 if (test_at_bottom) {
1518 // Continue is either the update expression or the test at
1519 // the bottom, no need to label the test at the top.
1520 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1521 } else if (node->next() == NULL) {
1522 // We are not recompiling the test at the bottom and there
1523 // is no update expression.
1524 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1525 node->continue_target()->Bind();
1526 } else {
1527 // We are not recompiling the test at the bottom and there
1528 // is an update expression.
1529 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1530 loop.Bind();
1531 }
1532
1533 // Compile the test with the body as the true target and
1534 // preferred fall-through and with the break target as the
1535 // false target.
1536 ControlDestination dest(&body, node->break_target(), true);
1537 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1538
1539 if (dest.false_was_fall_through()) {
1540 // If we got the break target as fall-through, the test may
1541 // have been unconditionally false (if there are no jumps to
1542 // the body).
1543 if (!body.is_linked()) break;
1544
1545 // Otherwise, jump around the body on the fall through and
1546 // then bind the body target.
1547 node->break_target()->Unuse();
1548 node->break_target()->Jump();
1549 body.Bind();
1550 }
1551 }
1552
1553 CheckStack(); // TODO(1222600): ignore if body contains calls.
1554 Visit(node->body());
1555
1556 // If there is an update expression, compile it if necessary.
1557 if (node->next() != NULL) {
1558 if (node->continue_target()->is_linked()) {
1559 node->continue_target()->Bind();
1560 }
1561
1562 // Control can reach the update by falling out of the body or
1563 // by a continue.
1564 if (has_valid_frame()) {
1565 // Record the source position of the statement as this code
1566 // which is after the code for the body actually belongs to
1567 // the loop statement and not the body.
1568 CodeForStatementPosition(node);
1569 Visit(node->next());
1570 }
1571 }
1572
1573 // Based on the condition analysis, compile the backward jump as
1574 // necessary.
1575 if (info == ALWAYS_TRUE) {
1576 if (has_valid_frame()) {
1577 if (node->next() == NULL) {
1578 node->continue_target()->Jump();
1579 } else {
1580 loop.Jump();
1581 }
1582 }
1583 } else {
1584 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
1585 if (test_at_bottom) {
1586 if (node->continue_target()->is_linked()) {
1587 // We can have dangling jumps to the continue target if
1588 // there was no update expression.
1589 node->continue_target()->Bind();
1590 }
1591 // Control can reach the test at the bottom by falling out
1592 // of the body, by a continue in the body, or from the
1593 // update expression.
1594 if (has_valid_frame()) {
1595 // The break target is the fall-through (body is a
1596 // backward jump from here).
1597 ControlDestination dest(&body, node->break_target(), false);
1598 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1599 }
1600 } else {
1601 // Otherwise, jump back to the test at the top.
1602 if (has_valid_frame()) {
1603 if (node->next() == NULL) {
1604 node->continue_target()->Jump();
1605 } else {
1606 loop.Jump();
1607 }
1608 }
1609 }
1610 }
1611
1612 // The break target may be already bound (by the condition), or
1613 // there may not be a valid frame. Bind it only if needed.
1614 if (node->break_target()->is_linked()) {
1615 node->break_target()->Bind();
1616 }
1617 break;
1618 }
1619 }
1620
1621 DecrementLoopNesting();
1622 node->continue_target()->Unuse();
1623 node->break_target()->Unuse();
1624}
1625
1626
1627void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1628 ASSERT(!in_spilled_code());
1629 VirtualFrame::SpilledScope spilled_scope;
1630 Comment cmnt(masm_, "[ ForInStatement");
1631 CodeForStatementPosition(node);
1632
1633 JumpTarget primitive;
1634 JumpTarget jsobject;
1635 JumpTarget fixed_array;
1636 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1637 JumpTarget end_del_check;
1638 JumpTarget exit;
1639
1640 // Get the object to enumerate over (converted to JSObject).
1641 LoadAndSpill(node->enumerable());
1642
1643 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1644 // to the specification. 12.6.4 mandates a call to ToObject.
1645 frame_->EmitPop(rax);
1646
1647 // rax: value to be iterated over
1648 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1649 exit.Branch(equal);
1650 __ CompareRoot(rax, Heap::kNullValueRootIndex);
1651 exit.Branch(equal);
1652
1653 // Stack layout in body:
1654 // [iteration counter (smi)] <- slot 0
1655 // [length of array] <- slot 1
1656 // [FixedArray] <- slot 2
1657 // [Map or 0] <- slot 3
1658 // [Object] <- slot 4
1659
1660 // Check if enumerable is already a JSObject
1661 // rax: value to be iterated over
1662 Condition is_smi = masm_->CheckSmi(rax);
1663 primitive.Branch(is_smi);
1664 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1665 jsobject.Branch(above_equal);
1666
1667 primitive.Bind();
1668 frame_->EmitPush(rax);
1669 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1670 // function call returns the value in rax, which is where we want it below
1671
1672 jsobject.Bind();
1673 // Get the set of properties (as a FixedArray or Map).
1674 // rax: value to be iterated over
1675 frame_->EmitPush(rax); // push the object being iterated over (slot 4)
1676
1677 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
1678 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1679
1680 // If we got a Map, we can do a fast modification check.
1681 // Otherwise, we got a FixedArray, and we have to do a slow check.
1682 // rax: map or fixed array (result from call to
1683 // Runtime::kGetPropertyNamesFast)
1684 __ movq(rdx, rax);
1685 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1686 __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1687 fixed_array.Branch(not_equal);
1688
1689 // Get enum cache
1690 // rax: map (result from call to Runtime::kGetPropertyNamesFast)
1691 __ movq(rcx, rax);
1692 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1693 // Get the bridge array held in the enumeration index field.
1694 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1695 // Get the cache from the bridge array.
1696 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1697
1698 frame_->EmitPush(rax); // <- slot 3
1699 frame_->EmitPush(rdx); // <- slot 2
1700 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1701 __ Integer32ToSmi(rax, rax);
1702 frame_->EmitPush(rax); // <- slot 1
1703 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
1704 entry.Jump();
1705
1706 fixed_array.Bind();
1707 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1708 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
1709 frame_->EmitPush(rax); // <- slot 2
1710
1711 // Push the length of the array and the initial index onto the stack.
1712 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1713 __ Integer32ToSmi(rax, rax);
1714 frame_->EmitPush(rax); // <- slot 1
1715 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
1716
1717 // Condition.
1718 entry.Bind();
1719 // Grab the current frame's height for the break and continue
1720 // targets only after all the state is pushed on the frame.
1721 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1722 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1723
1724 __ movq(rax, frame_->ElementAt(0)); // load the current count
1725 __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length
1726 node->break_target()->Branch(above_equal);
1727
1728 // Get the i'th entry of the array.
1729 __ movq(rdx, frame_->ElementAt(2));
1730 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1731 __ movq(rbx,
1732 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1733
1734 // Get the expected map from the stack or a zero map in the
1735 // permanent slow case rax: current iteration count rbx: i'th entry
1736 // of the enum cache
1737 __ movq(rdx, frame_->ElementAt(3));
1738 // Check if the expected map still matches that of the enumerable.
1739 // If not, we have to filter the key.
1740 // rax: current iteration count
1741 // rbx: i'th entry of the enum cache
1742 // rdx: expected map value
1743 __ movq(rcx, frame_->ElementAt(4));
1744 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1745 __ cmpq(rcx, rdx);
1746 end_del_check.Branch(equal);
1747
1748 // Convert the entry to a string (or null if it isn't a property anymore).
1749 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
1750 frame_->EmitPush(rbx); // push entry
1751 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1752 __ movq(rbx, rax);
1753
1754 // If the property has been removed while iterating, we just skip it.
1755 __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1756 node->continue_target()->Branch(equal);
1757
1758 end_del_check.Bind();
1759 // Store the entry in the 'each' expression and take another spin in the
1760 // loop. rdx: i'th entry of the enum cache (or string there of)
1761 frame_->EmitPush(rbx);
1762 { Reference each(this, node->each());
1763 // Loading a reference may leave the frame in an unspilled state.
1764 frame_->SpillAll();
1765 if (!each.is_illegal()) {
1766 if (each.size() > 0) {
1767 frame_->EmitPush(frame_->ElementAt(each.size()));
1768 }
1769 // If the reference was to a slot we rely on the convenient property
1770 // that it doesn't matter whether a value (eg, ebx pushed above) is
1771 // right on top of or right underneath a zero-sized reference.
1772 each.SetValue(NOT_CONST_INIT);
1773 if (each.size() > 0) {
1774 // It's safe to pop the value lying on top of the reference before
1775 // unloading the reference itself (which preserves the top of stack,
1776 // ie, now the topmost value of the non-zero sized reference), since
1777 // we will discard the top of stack after unloading the reference
1778 // anyway.
1779 frame_->Drop();
1780 }
1781 }
1782 }
1783 // Unloading a reference may leave the frame in an unspilled state.
1784 frame_->SpillAll();
1785
1786 // Discard the i'th entry pushed above or else the remainder of the
1787 // reference, whichever is currently on top of the stack.
1788 frame_->Drop();
1789
1790 // Body.
1791 CheckStack(); // TODO(1222600): ignore if body contains calls.
1792 VisitAndSpill(node->body());
1793
1794 // Next. Reestablish a spilled frame in case we are coming here via
1795 // a continue in the body.
1796 node->continue_target()->Bind();
1797 frame_->SpillAll();
1798 frame_->EmitPop(rax);
1799 __ addq(rax, Immediate(Smi::FromInt(1)));
1800 frame_->EmitPush(rax);
1801 entry.Jump();
1802
1803 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1804 // any frame.
1805 node->break_target()->Bind();
1806 frame_->Drop(5);
1807
1808 // Exit.
1809 exit.Bind();
1810
1811 node->continue_target()->Unuse();
1812 node->break_target()->Unuse();
1813}
1814
1815void CodeGenerator::VisitTryCatch(TryCatch* node) {
1816 ASSERT(!in_spilled_code());
1817 VirtualFrame::SpilledScope spilled_scope;
1818 Comment cmnt(masm_, "[ TryCatch");
1819 CodeForStatementPosition(node);
1820
1821 JumpTarget try_block;
1822 JumpTarget exit;
1823
1824 try_block.Call();
1825 // --- Catch block ---
1826 frame_->EmitPush(rax);
1827
1828 // Store the caught exception in the catch variable.
1829 { Reference ref(this, node->catch_var());
1830 ASSERT(ref.is_slot());
1831 // Load the exception to the top of the stack. Here we make use of the
1832 // convenient property that it doesn't matter whether a value is
1833 // immediately on top of or underneath a zero-sized reference.
1834 ref.SetValue(NOT_CONST_INIT);
1835 }
1836
1837 // Remove the exception from the stack.
1838 frame_->Drop();
1839
1840 VisitStatementsAndSpill(node->catch_block()->statements());
1841 if (has_valid_frame()) {
1842 exit.Jump();
1843 }
1844
1845
1846 // --- Try block ---
1847 try_block.Bind();
1848
1849 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1850 int handler_height = frame_->height();
1851
1852 // Shadow the jump targets for all escapes from the try block, including
1853 // returns. During shadowing, the original target is hidden as the
1854 // ShadowTarget and operations on the original actually affect the
1855 // shadowing target.
1856 //
1857 // We should probably try to unify the escaping targets and the return
1858 // target.
1859 int nof_escapes = node->escaping_targets()->length();
1860 List<ShadowTarget*> shadows(1 + nof_escapes);
1861
1862 // Add the shadow target for the function return.
1863 static const int kReturnShadowIndex = 0;
1864 shadows.Add(new ShadowTarget(&function_return_));
1865 bool function_return_was_shadowed = function_return_is_shadowed_;
1866 function_return_is_shadowed_ = true;
1867 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1868
1869 // Add the remaining shadow targets.
1870 for (int i = 0; i < nof_escapes; i++) {
1871 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1872 }
1873
1874 // Generate code for the statements in the try block.
1875 VisitStatementsAndSpill(node->try_block()->statements());
1876
1877 // Stop the introduced shadowing and count the number of required unlinks.
1878 // After shadowing stops, the original targets are unshadowed and the
1879 // ShadowTargets represent the formerly shadowing targets.
1880 bool has_unlinks = false;
1881 for (int i = 0; i < shadows.length(); i++) {
1882 shadows[i]->StopShadowing();
1883 has_unlinks = has_unlinks || shadows[i]->is_linked();
1884 }
1885 function_return_is_shadowed_ = function_return_was_shadowed;
1886
1887 // Get an external reference to the handler address.
1888 ExternalReference handler_address(Top::k_handler_address);
1889
1890 // Make sure that there's nothing left on the stack above the
1891 // handler structure.
1892 if (FLAG_debug_code) {
1893 __ movq(kScratchRegister, handler_address);
1894 __ cmpq(rsp, Operand(kScratchRegister, 0));
1895 __ Assert(equal, "stack pointer should point to top handler");
1896 }
1897
1898 // If we can fall off the end of the try block, unlink from try chain.
1899 if (has_valid_frame()) {
1900 // The next handler address is on top of the frame. Unlink from
1901 // the handler list and drop the rest of this handler from the
1902 // frame.
1903 ASSERT(StackHandlerConstants::kNextOffset == 0);
1904 __ movq(kScratchRegister, handler_address);
1905 frame_->EmitPop(Operand(kScratchRegister, 0));
1906 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1907 if (has_unlinks) {
1908 exit.Jump();
1909 }
1910 }
1911
1912 // Generate unlink code for the (formerly) shadowing targets that
1913 // have been jumped to. Deallocate each shadow target.
1914 Result return_value;
1915 for (int i = 0; i < shadows.length(); i++) {
1916 if (shadows[i]->is_linked()) {
1917 // Unlink from try chain; be careful not to destroy the TOS if
1918 // there is one.
1919 if (i == kReturnShadowIndex) {
1920 shadows[i]->Bind(&return_value);
1921 return_value.ToRegister(rax);
1922 } else {
1923 shadows[i]->Bind();
1924 }
1925 // Because we can be jumping here (to spilled code) from
1926 // unspilled code, we need to reestablish a spilled frame at
1927 // this block.
1928 frame_->SpillAll();
1929
1930 // Reload sp from the top handler, because some statements that we
1931 // break from (eg, for...in) may have left stuff on the stack.
1932 __ movq(kScratchRegister, handler_address);
1933 __ movq(rsp, Operand(kScratchRegister, 0));
1934 frame_->Forget(frame_->height() - handler_height);
1935
1936 ASSERT(StackHandlerConstants::kNextOffset == 0);
1937 __ movq(kScratchRegister, handler_address);
1938 frame_->EmitPop(Operand(kScratchRegister, 0));
1939 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1940
1941 if (i == kReturnShadowIndex) {
1942 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
1943 shadows[i]->other_target()->Jump(&return_value);
1944 } else {
1945 shadows[i]->other_target()->Jump();
1946 }
1947 }
1948 }
1949
1950 exit.Bind();
1951}
1952
1953
1954void CodeGenerator::VisitTryFinally(TryFinally* node) {
1955 ASSERT(!in_spilled_code());
1956 VirtualFrame::SpilledScope spilled_scope;
1957 Comment cmnt(masm_, "[ TryFinally");
1958 CodeForStatementPosition(node);
1959
1960 // State: Used to keep track of reason for entering the finally
1961 // block. Should probably be extended to hold information for
1962 // break/continue from within the try block.
1963 enum { FALLING, THROWING, JUMPING };
1964
1965 JumpTarget try_block;
1966 JumpTarget finally_block;
1967
1968 try_block.Call();
1969
1970 frame_->EmitPush(rax);
1971 // In case of thrown exceptions, this is where we continue.
1972 __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
1973 finally_block.Jump();
1974
1975 // --- Try block ---
1976 try_block.Bind();
1977
1978 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
1979 int handler_height = frame_->height();
1980
1981 // Shadow the jump targets for all escapes from the try block, including
1982 // returns. During shadowing, the original target is hidden as the
1983 // ShadowTarget and operations on the original actually affect the
1984 // shadowing target.
1985 //
1986 // We should probably try to unify the escaping targets and the return
1987 // target.
1988 int nof_escapes = node->escaping_targets()->length();
1989 List<ShadowTarget*> shadows(1 + nof_escapes);
1990
1991 // Add the shadow target for the function return.
1992 static const int kReturnShadowIndex = 0;
1993 shadows.Add(new ShadowTarget(&function_return_));
1994 bool function_return_was_shadowed = function_return_is_shadowed_;
1995 function_return_is_shadowed_ = true;
1996 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1997
1998 // Add the remaining shadow targets.
1999 for (int i = 0; i < nof_escapes; i++) {
2000 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2001 }
2002
2003 // Generate code for the statements in the try block.
2004 VisitStatementsAndSpill(node->try_block()->statements());
2005
2006 // Stop the introduced shadowing and count the number of required unlinks.
2007 // After shadowing stops, the original targets are unshadowed and the
2008 // ShadowTargets represent the formerly shadowing targets.
2009 int nof_unlinks = 0;
2010 for (int i = 0; i < shadows.length(); i++) {
2011 shadows[i]->StopShadowing();
2012 if (shadows[i]->is_linked()) nof_unlinks++;
2013 }
2014 function_return_is_shadowed_ = function_return_was_shadowed;
2015
2016 // Get an external reference to the handler address.
2017 ExternalReference handler_address(Top::k_handler_address);
2018
2019 // If we can fall off the end of the try block, unlink from the try
2020 // chain and set the state on the frame to FALLING.
2021 if (has_valid_frame()) {
2022 // The next handler address is on top of the frame.
2023 ASSERT(StackHandlerConstants::kNextOffset == 0);
2024 __ movq(kScratchRegister, handler_address);
2025 frame_->EmitPop(Operand(kScratchRegister, 0));
2026 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2027
2028 // Fake a top of stack value (unneeded when FALLING) and set the
2029 // state in ecx, then jump around the unlink blocks if any.
2030 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2031 __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
2032 if (nof_unlinks > 0) {
2033 finally_block.Jump();
2034 }
2035 }
2036
2037 // Generate code to unlink and set the state for the (formerly)
2038 // shadowing targets that have been jumped to.
2039 for (int i = 0; i < shadows.length(); i++) {
2040 if (shadows[i]->is_linked()) {
2041 // If we have come from the shadowed return, the return value is
2042 // on the virtual frame. We must preserve it until it is
2043 // pushed.
2044 if (i == kReturnShadowIndex) {
2045 Result return_value;
2046 shadows[i]->Bind(&return_value);
2047 return_value.ToRegister(rax);
2048 } else {
2049 shadows[i]->Bind();
2050 }
2051 // Because we can be jumping here (to spilled code) from
2052 // unspilled code, we need to reestablish a spilled frame at
2053 // this block.
2054 frame_->SpillAll();
2055
2056 // Reload sp from the top handler, because some statements that
2057 // we break from (eg, for...in) may have left stuff on the
2058 // stack.
2059 __ movq(kScratchRegister, handler_address);
2060 __ movq(rsp, Operand(kScratchRegister, 0));
2061 frame_->Forget(frame_->height() - handler_height);
2062
2063 // Unlink this handler and drop it from the frame.
2064 ASSERT(StackHandlerConstants::kNextOffset == 0);
2065 __ movq(kScratchRegister, handler_address);
2066 frame_->EmitPop(Operand(kScratchRegister, 0));
2067 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2068
2069 if (i == kReturnShadowIndex) {
2070 // If this target shadowed the function return, materialize
2071 // the return value on the stack.
2072 frame_->EmitPush(rax);
2073 } else {
2074 // Fake TOS for targets that shadowed breaks and continues.
2075 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2076 }
2077 __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
2078 if (--nof_unlinks > 0) {
2079 // If this is not the last unlink block, jump around the next.
2080 finally_block.Jump();
2081 }
2082 }
2083 }
2084
2085 // --- Finally block ---
2086 finally_block.Bind();
2087
2088 // Push the state on the stack.
2089 frame_->EmitPush(rcx);
2090
2091 // We keep two elements on the stack - the (possibly faked) result
2092 // and the state - while evaluating the finally block.
2093 //
2094 // Generate code for the statements in the finally block.
2095 VisitStatementsAndSpill(node->finally_block()->statements());
2096
2097 if (has_valid_frame()) {
2098 // Restore state and return value or faked TOS.
2099 frame_->EmitPop(rcx);
2100 frame_->EmitPop(rax);
2101 }
2102
2103 // Generate code to jump to the right destination for all used
2104 // formerly shadowing targets. Deallocate each shadow target.
2105 for (int i = 0; i < shadows.length(); i++) {
2106 if (has_valid_frame() && shadows[i]->is_bound()) {
2107 BreakTarget* original = shadows[i]->other_target();
2108 __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
2109 if (i == kReturnShadowIndex) {
2110 // The return value is (already) in rax.
2111 Result return_value = allocator_->Allocate(rax);
2112 ASSERT(return_value.is_valid());
2113 if (function_return_is_shadowed_) {
2114 original->Branch(equal, &return_value);
2115 } else {
2116 // Branch around the preparation for return which may emit
2117 // code.
2118 JumpTarget skip;
2119 skip.Branch(not_equal);
2120 frame_->PrepareForReturn();
2121 original->Jump(&return_value);
2122 skip.Bind();
2123 }
2124 } else {
2125 original->Branch(equal);
2126 }
2127 }
2128 }
2129
2130 if (has_valid_frame()) {
2131 // Check if we need to rethrow the exception.
2132 JumpTarget exit;
2133 __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
2134 exit.Branch(not_equal);
2135
2136 // Rethrow exception.
2137 frame_->EmitPush(rax); // undo pop from above
2138 frame_->CallRuntime(Runtime::kReThrow, 1);
2139
2140 // Done.
2141 exit.Bind();
2142 }
2143}
2144
2145
2146void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2147 ASSERT(!in_spilled_code());
2148 Comment cmnt(masm_, "[ DebuggerStatement");
2149 CodeForStatementPosition(node);
2150#ifdef ENABLE_DEBUGGER_SUPPORT
2151 // Spill everything, even constants, to the frame.
2152 frame_->SpillAll();
2153 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2154 // Ignore the return value.
2155#endif
2156}
2157
2158
2159void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2160 // Call the runtime to instantiate the function boilerplate object.
2161 // The inevitable call will sync frame elements to memory anyway, so
2162 // we do it eagerly to allow us to push the arguments directly into
2163 // place.
2164 ASSERT(boilerplate->IsBoilerplate());
2165 frame_->SyncRange(0, frame_->element_count() - 1);
2166
2167 // Push the boilerplate on the stack.
2168 __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
2169 frame_->EmitPush(kScratchRegister);
2170
2171 // Create a new closure.
2172 frame_->EmitPush(rsi);
2173 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2174 frame_->Push(&result);
2175}
2176
2177
2178void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2179 Comment cmnt(masm_, "[ FunctionLiteral");
2180
2181 // Build the function boilerplate and instantiate it.
2182 Handle<JSFunction> boilerplate = BuildBoilerplate(node);
2183 // Check for stack-overflow exception.
2184 if (HasStackOverflow()) return;
2185 InstantiateBoilerplate(boilerplate);
2186}
2187
2188
2189void CodeGenerator::VisitFunctionBoilerplateLiteral(
2190 FunctionBoilerplateLiteral* node) {
2191 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2192 InstantiateBoilerplate(node->boilerplate());
2193}
2194
2195
2196void CodeGenerator::VisitConditional(Conditional* node) {
2197 Comment cmnt(masm_, "[ Conditional");
2198 JumpTarget then;
2199 JumpTarget else_;
2200 JumpTarget exit;
2201 ControlDestination dest(&then, &else_, true);
2202 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
2203
2204 if (dest.false_was_fall_through()) {
2205 // The else target was bound, so we compile the else part first.
2206 Load(node->else_expression(), typeof_state());
2207
2208 if (then.is_linked()) {
2209 exit.Jump();
2210 then.Bind();
2211 Load(node->then_expression(), typeof_state());
2212 }
2213 } else {
2214 // The then target was bound, so we compile the then part first.
2215 Load(node->then_expression(), typeof_state());
2216
2217 if (else_.is_linked()) {
2218 exit.Jump();
2219 else_.Bind();
2220 Load(node->else_expression(), typeof_state());
2221 }
2222 }
2223
2224 exit.Bind();
2225}
2226
2227
2228void CodeGenerator::VisitSlot(Slot* node) {
2229 Comment cmnt(masm_, "[ Slot");
2230 LoadFromSlotCheckForArguments(node, typeof_state());
2231}
2232
2233
2234void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2235 Comment cmnt(masm_, "[ VariableProxy");
2236 Variable* var = node->var();
2237 Expression* expr = var->rewrite();
2238 if (expr != NULL) {
2239 Visit(expr);
2240 } else {
2241 ASSERT(var->is_global());
2242 Reference ref(this, node);
2243 ref.GetValue(typeof_state());
2244 }
2245}
2246
2247
2248void CodeGenerator::VisitLiteral(Literal* node) {
2249 Comment cmnt(masm_, "[ Literal");
2250 frame_->Push(node->handle());
2251}
2252
2253
2254// Materialize the regexp literal 'node' in the literals array
2255// 'literals' of the function. Leave the regexp boilerplate in
2256// 'boilerplate'.
2257class DeferredRegExpLiteral: public DeferredCode {
2258 public:
2259 DeferredRegExpLiteral(Register boilerplate,
2260 Register literals,
2261 RegExpLiteral* node)
2262 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2263 set_comment("[ DeferredRegExpLiteral");
2264 }
2265
2266 void Generate();
2267
2268 private:
2269 Register boilerplate_;
2270 Register literals_;
2271 RegExpLiteral* node_;
2272};
2273
2274
2275void DeferredRegExpLiteral::Generate() {
2276 // Since the entry is undefined we call the runtime system to
2277 // compute the literal.
2278 // Literal array (0).
2279 __ push(literals_);
2280 // Literal index (1).
2281 __ push(Immediate(Smi::FromInt(node_->literal_index())));
2282 // RegExp pattern (2).
2283 __ Push(node_->pattern());
2284 // RegExp flags (3).
2285 __ Push(node_->flags());
2286 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2287 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2288}
2289
2290
2291void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2292 Comment cmnt(masm_, "[ RegExp Literal");
2293
2294 // Retrieve the literals array and check the allocated entry. Begin
2295 // with a writable copy of the function of this activation in a
2296 // register.
2297 frame_->PushFunction();
2298 Result literals = frame_->Pop();
2299 literals.ToRegister();
2300 frame_->Spill(literals.reg());
2301
2302 // Load the literals array of the function.
2303 __ movq(literals.reg(),
2304 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2305
2306 // Load the literal at the ast saved index.
2307 Result boilerplate = allocator_->Allocate();
2308 ASSERT(boilerplate.is_valid());
2309 int literal_offset =
2310 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2311 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2312
2313 // Check whether we need to materialize the RegExp object. If so,
2314 // jump to the deferred code passing the literals array.
2315 DeferredRegExpLiteral* deferred =
2316 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2317 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2318 deferred->Branch(equal);
2319 deferred->BindExit();
2320 literals.Unuse();
2321
2322 // Push the boilerplate object.
2323 frame_->Push(&boilerplate);
2324}
2325
2326
2327// Materialize the object literal 'node' in the literals array
2328// 'literals' of the function. Leave the object boilerplate in
2329// 'boilerplate'.
2330class DeferredObjectLiteral: public DeferredCode {
2331 public:
2332 DeferredObjectLiteral(Register boilerplate,
2333 Register literals,
2334 ObjectLiteral* node)
2335 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2336 set_comment("[ DeferredObjectLiteral");
2337 }
2338
2339 void Generate();
2340
2341 private:
2342 Register boilerplate_;
2343 Register literals_;
2344 ObjectLiteral* node_;
2345};
2346
2347
2348void DeferredObjectLiteral::Generate() {
2349 // Since the entry is undefined we call the runtime system to
2350 // compute the literal.
2351 // Literal array (0).
2352 __ push(literals_);
2353 // Literal index (1).
2354 __ push(Immediate(Smi::FromInt(node_->literal_index())));
2355 // Constant properties (2).
2356 __ Push(node_->constant_properties());
2357 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
2358 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2359}
2360
2361
2362void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2363 Comment cmnt(masm_, "[ ObjectLiteral");
2364
2365 // Retrieve the literals array and check the allocated entry. Begin
2366 // with a writable copy of the function of this activation in a
2367 // register.
2368 frame_->PushFunction();
2369 Result literals = frame_->Pop();
2370 literals.ToRegister();
2371 frame_->Spill(literals.reg());
2372
2373 // Load the literals array of the function.
2374 __ movq(literals.reg(),
2375 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2376
2377 // Load the literal at the ast saved index.
2378 Result boilerplate = allocator_->Allocate();
2379 ASSERT(boilerplate.is_valid());
2380 int literal_offset =
2381 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2382 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2383
2384 // Check whether we need to materialize the object literal boilerplate.
2385 // If so, jump to the deferred code passing the literals array.
2386 DeferredObjectLiteral* deferred =
2387 new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
2388 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2389 deferred->Branch(equal);
2390 deferred->BindExit();
2391 literals.Unuse();
2392
2393 // Push the boilerplate object.
2394 frame_->Push(&boilerplate);
2395 // Clone the boilerplate object.
2396 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2397 if (node->depth() == 1) {
2398 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2399 }
2400 Result clone = frame_->CallRuntime(clone_function_id, 1);
2401 // Push the newly cloned literal object as the result.
2402 frame_->Push(&clone);
2403
2404 for (int i = 0; i < node->properties()->length(); i++) {
2405 ObjectLiteral::Property* property = node->properties()->at(i);
2406 switch (property->kind()) {
2407 case ObjectLiteral::Property::CONSTANT:
2408 break;
2409 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2410 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2411 // else fall through.
2412 case ObjectLiteral::Property::COMPUTED: {
2413 Handle<Object> key(property->key()->handle());
2414 if (key->IsSymbol()) {
2415 // Duplicate the object as the IC receiver.
2416 frame_->Dup();
2417 Load(property->value());
2418 frame_->Push(key);
2419 Result ignored = frame_->CallStoreIC();
2420 // Drop the duplicated receiver and ignore the result.
2421 frame_->Drop();
2422 break;
2423 }
2424 // Fall through
2425 }
2426 case ObjectLiteral::Property::PROTOTYPE: {
2427 // Duplicate the object as an argument to the runtime call.
2428 frame_->Dup();
2429 Load(property->key());
2430 Load(property->value());
2431 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2432 // Ignore the result.
2433 break;
2434 }
2435 case ObjectLiteral::Property::SETTER: {
2436 // Duplicate the object as an argument to the runtime call.
2437 frame_->Dup();
2438 Load(property->key());
2439 frame_->Push(Smi::FromInt(1));
2440 Load(property->value());
2441 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2442 // Ignore the result.
2443 break;
2444 }
2445 case ObjectLiteral::Property::GETTER: {
2446 // Duplicate the object as an argument to the runtime call.
2447 frame_->Dup();
2448 Load(property->key());
2449 frame_->Push(Smi::FromInt(0));
2450 Load(property->value());
2451 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2452 // Ignore the result.
2453 break;
2454 }
2455 default: UNREACHABLE();
2456 }
2457 }
2458}
2459
2460
2461// Materialize the array literal 'node' in the literals array 'literals'
2462// of the function. Leave the array boilerplate in 'boilerplate'.
2463class DeferredArrayLiteral: public DeferredCode {
2464 public:
2465 DeferredArrayLiteral(Register boilerplate,
2466 Register literals,
2467 ArrayLiteral* node)
2468 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2469 set_comment("[ DeferredArrayLiteral");
2470 }
2471
2472 void Generate();
2473
2474 private:
2475 Register boilerplate_;
2476 Register literals_;
2477 ArrayLiteral* node_;
2478};
2479
2480
2481void DeferredArrayLiteral::Generate() {
2482 // Since the entry is undefined we call the runtime system to
2483 // compute the literal.
2484 // Literal array (0).
2485 __ push(literals_);
2486 // Literal index (1).
2487 __ push(Immediate(Smi::FromInt(node_->literal_index())));
2488 // Constant properties (2).
2489 __ Push(node_->literals());
2490 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
2491 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2492}
2493
2494
2495void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2496 Comment cmnt(masm_, "[ ArrayLiteral");
2497
2498 // Retrieve the literals array and check the allocated entry. Begin
2499 // with a writable copy of the function of this activation in a
2500 // register.
2501 frame_->PushFunction();
2502 Result literals = frame_->Pop();
2503 literals.ToRegister();
2504 frame_->Spill(literals.reg());
2505
2506 // Load the literals array of the function.
2507 __ movq(literals.reg(),
2508 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2509
2510 // Load the literal at the ast saved index.
2511 Result boilerplate = allocator_->Allocate();
2512 ASSERT(boilerplate.is_valid());
2513 int literal_offset =
2514 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2515 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2516
2517 // Check whether we need to materialize the object literal boilerplate.
2518 // If so, jump to the deferred code passing the literals array.
2519 DeferredArrayLiteral* deferred =
2520 new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
2521 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2522 deferred->Branch(equal);
2523 deferred->BindExit();
2524 literals.Unuse();
2525
2526 // Push the resulting array literal boilerplate on the stack.
2527 frame_->Push(&boilerplate);
2528 // Clone the boilerplate object.
2529 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2530 if (node->depth() == 1) {
2531 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2532 }
2533 Result clone = frame_->CallRuntime(clone_function_id, 1);
2534 // Push the newly cloned literal object as the result.
2535 frame_->Push(&clone);
2536
2537 // Generate code to set the elements in the array that are not
2538 // literals.
2539 for (int i = 0; i < node->values()->length(); i++) {
2540 Expression* value = node->values()->at(i);
2541
2542 // If value is a literal the property value is already set in the
2543 // boilerplate object.
2544 if (value->AsLiteral() != NULL) continue;
2545 // If value is a materialized literal the property value is already set
2546 // in the boilerplate object if it is simple.
2547 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2548
2549 // The property must be set by generated code.
2550 Load(value);
2551
2552 // Get the property value off the stack.
2553 Result prop_value = frame_->Pop();
2554 prop_value.ToRegister();
2555
2556 // Fetch the array literal while leaving a copy on the stack and
2557 // use it to get the elements array.
2558 frame_->Dup();
2559 Result elements = frame_->Pop();
2560 elements.ToRegister();
2561 frame_->Spill(elements.reg());
2562 // Get the elements FixedArray.
2563 __ movq(elements.reg(),
2564 FieldOperand(elements.reg(), JSObject::kElementsOffset));
2565
2566 // Write to the indexed properties array.
2567 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2568 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2569
2570 // Update the write barrier for the array address.
2571 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
2572 Result scratch = allocator_->Allocate();
2573 ASSERT(scratch.is_valid());
2574 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2575 }
2576}
2577
2578
2579void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2580 ASSERT(!in_spilled_code());
2581 // Call runtime routine to allocate the catch extension object and
2582 // assign the exception value to the catch variable.
2583 Comment cmnt(masm_, "[ CatchExtensionObject");
2584 Load(node->key());
2585 Load(node->value());
2586 Result result =
2587 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2588 frame_->Push(&result);
2589}
2590
2591
2592void CodeGenerator::VisitAssignment(Assignment* node) {
2593 Comment cmnt(masm_, "[ Assignment");
2594
2595 { Reference target(this, node->target());
2596 if (target.is_illegal()) {
2597 // Fool the virtual frame into thinking that we left the assignment's
2598 // value on the frame.
2599 frame_->Push(Smi::FromInt(0));
2600 return;
2601 }
2602 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2603
2604 if (node->starts_initialization_block()) {
2605 ASSERT(target.type() == Reference::NAMED ||
2606 target.type() == Reference::KEYED);
2607 // Change to slow case in the beginning of an initialization
2608 // block to avoid the quadratic behavior of repeatedly adding
2609 // fast properties.
2610
2611 // The receiver is the argument to the runtime call. It is the
2612 // first value pushed when the reference was loaded to the
2613 // frame.
2614 frame_->PushElementAt(target.size() - 1);
2615 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2616 }
2617 if (node->op() == Token::ASSIGN ||
2618 node->op() == Token::INIT_VAR ||
2619 node->op() == Token::INIT_CONST) {
2620 Load(node->value());
2621
2622 } else {
2623 Literal* literal = node->value()->AsLiteral();
2624 bool overwrite_value =
2625 (node->value()->AsBinaryOperation() != NULL &&
2626 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2627 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2628 // There are two cases where the target is not read in the right hand
2629 // side, that are easy to test for: the right hand side is a literal,
2630 // or the right hand side is a different variable. TakeValue invalidates
2631 // the target, with an implicit promise that it will be written to again
2632 // before it is read.
2633 if (literal != NULL || (right_var != NULL && right_var != var)) {
2634 target.TakeValue(NOT_INSIDE_TYPEOF);
2635 } else {
2636 target.GetValue(NOT_INSIDE_TYPEOF);
2637 }
2638 Load(node->value());
2639 GenericBinaryOperation(node->binary_op(),
2640 node->type(),
2641 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2642 }
2643
2644 if (var != NULL &&
2645 var->mode() == Variable::CONST &&
2646 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2647 // Assignment ignored - leave the value on the stack.
2648 } else {
2649 CodeForSourcePosition(node->position());
2650 if (node->op() == Token::INIT_CONST) {
2651 // Dynamic constant initializations must use the function context
2652 // and initialize the actual constant declared. Dynamic variable
2653 // initializations are simply assignments and use SetValue.
2654 target.SetValue(CONST_INIT);
2655 } else {
2656 target.SetValue(NOT_CONST_INIT);
2657 }
2658 if (node->ends_initialization_block()) {
2659 ASSERT(target.type() == Reference::NAMED ||
2660 target.type() == Reference::KEYED);
2661 // End of initialization block. Revert to fast case. The
2662 // argument to the runtime call is the receiver, which is the
2663 // first value pushed as part of the reference, which is below
2664 // the lhs value.
2665 frame_->PushElementAt(target.size());
2666 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2667 }
2668 }
2669 }
2670}
2671
2672
2673void CodeGenerator::VisitThrow(Throw* node) {
2674 Comment cmnt(masm_, "[ Throw");
2675 Load(node->exception());
2676 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2677 frame_->Push(&result);
2678}
2679
2680
2681void CodeGenerator::VisitProperty(Property* node) {
2682 Comment cmnt(masm_, "[ Property");
2683 Reference property(this, node);
2684 property.GetValue(typeof_state());
2685}
2686
2687
2688void CodeGenerator::VisitCall(Call* node) {
2689 Comment cmnt(masm_, "[ Call");
2690
2691 ZoneList<Expression*>* args = node->arguments();
2692
2693 // Check if the function is a variable or a property.
2694 Expression* function = node->expression();
2695 Variable* var = function->AsVariableProxy()->AsVariable();
2696 Property* property = function->AsProperty();
2697
2698 // ------------------------------------------------------------------------
2699 // Fast-case: Use inline caching.
2700 // ---
2701 // According to ECMA-262, section 11.2.3, page 44, the function to call
2702 // must be resolved after the arguments have been evaluated. The IC code
2703 // automatically handles this by loading the arguments before the function
2704 // is resolved in cache misses (this also holds for megamorphic calls).
2705 // ------------------------------------------------------------------------
2706
2707 if (var != NULL && var->is_possibly_eval()) {
2708 // ----------------------------------
2709 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2710 // ----------------------------------
2711
2712 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2713 // resolve the function we need to call and the receiver of the
2714 // call. Then we call the resolved function using the given
2715 // arguments.
2716
2717 // Prepare the stack for the call to the resolved function.
2718 Load(function);
2719
2720 // Allocate a frame slot for the receiver.
2721 frame_->Push(Factory::undefined_value());
2722 int arg_count = args->length();
2723 for (int i = 0; i < arg_count; i++) {
2724 Load(args->at(i));
2725 }
2726
2727 // Prepare the stack for the call to ResolvePossiblyDirectEval.
2728 frame_->PushElementAt(arg_count + 1);
2729 if (arg_count > 0) {
2730 frame_->PushElementAt(arg_count);
2731 } else {
2732 frame_->Push(Factory::undefined_value());
2733 }
2734
2735 // Resolve the call.
2736 Result result =
2737 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
2738
2739 // Touch up the stack with the right values for the function and the
2740 // receiver. Use a scratch register to avoid destroying the result.
2741 Result scratch = allocator_->Allocate();
2742 ASSERT(scratch.is_valid());
2743 __ movq(scratch.reg(),
2744 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
2745 frame_->SetElementAt(arg_count + 1, &scratch);
2746
2747 // We can reuse the result register now.
2748 frame_->Spill(result.reg());
2749 __ movq(result.reg(),
2750 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
2751 frame_->SetElementAt(arg_count, &result);
2752
2753 // Call the function.
2754 CodeForSourcePosition(node->position());
2755 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2756 CallFunctionStub call_function(arg_count, in_loop);
2757 result = frame_->CallStub(&call_function, arg_count + 1);
2758
2759 // Restore the context and overwrite the function on the stack with
2760 // the result.
2761 frame_->RestoreContextRegister();
2762 frame_->SetElementAt(0, &result);
2763
2764 } else if (var != NULL && !var->is_this() && var->is_global()) {
2765 // ----------------------------------
2766 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2767 // ----------------------------------
2768
2769 // Push the name of the function and the receiver onto the stack.
2770 frame_->Push(var->name());
2771
2772 // Pass the global object as the receiver and let the IC stub
2773 // patch the stack to use the global proxy as 'this' in the
2774 // invoked function.
2775 LoadGlobal();
2776
2777 // Load the arguments.
2778 int arg_count = args->length();
2779 for (int i = 0; i < arg_count; i++) {
2780 Load(args->at(i));
2781 }
2782
2783 // Call the IC initialization code.
2784 CodeForSourcePosition(node->position());
2785 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2786 arg_count,
2787 loop_nesting());
2788 frame_->RestoreContextRegister();
2789 // Replace the function on the stack with the result.
2790 frame_->SetElementAt(0, &result);
2791
2792 } else if (var != NULL && var->slot() != NULL &&
2793 var->slot()->type() == Slot::LOOKUP) {
2794 // ----------------------------------
2795 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
2796 // ----------------------------------
2797
2798 // Load the function from the context. Sync the frame so we can
2799 // push the arguments directly into place.
2800 frame_->SyncRange(0, frame_->element_count() - 1);
2801 frame_->EmitPush(rsi);
2802 frame_->EmitPush(var->name());
2803 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2804 // The runtime call returns a pair of values in rax and rdx. The
2805 // looked-up function is in rax and the receiver is in rdx. These
2806 // register references are not ref counted here. We spill them
2807 // eagerly since they are arguments to an inevitable call (and are
2808 // not sharable by the arguments).
2809 ASSERT(!allocator()->is_used(rax));
2810 frame_->EmitPush(rax);
2811
2812 // Load the receiver.
2813 ASSERT(!allocator()->is_used(rdx));
2814 frame_->EmitPush(rdx);
2815
2816 // Call the function.
2817 CallWithArguments(args, node->position());
2818
2819 } else if (property != NULL) {
2820 // Check if the key is a literal string.
2821 Literal* literal = property->key()->AsLiteral();
2822
2823 if (literal != NULL && literal->handle()->IsSymbol()) {
2824 // ------------------------------------------------------------------
2825 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2826 // ------------------------------------------------------------------
2827
2828 Handle<String> name = Handle<String>::cast(literal->handle());
2829
2830 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2831 name->IsEqualTo(CStrVector("apply")) &&
2832 args->length() == 2 &&
2833 args->at(1)->AsVariableProxy() != NULL &&
2834 args->at(1)->AsVariableProxy()->IsArguments()) {
2835 // Use the optimized Function.prototype.apply that avoids
2836 // allocating lazily allocated arguments objects.
2837 CallApplyLazy(property,
2838 args->at(0),
2839 args->at(1)->AsVariableProxy(),
2840 node->position());
2841
2842 } else {
2843 // Push the name of the function and the receiver onto the stack.
2844 frame_->Push(name);
2845 Load(property->obj());
2846
2847 // Load the arguments.
2848 int arg_count = args->length();
2849 for (int i = 0; i < arg_count; i++) {
2850 Load(args->at(i));
2851 }
2852
2853 // Call the IC initialization code.
2854 CodeForSourcePosition(node->position());
2855 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2856 arg_count,
2857 loop_nesting());
2858 frame_->RestoreContextRegister();
2859 // Replace the function on the stack with the result.
2860 frame_->SetElementAt(0, &result);
2861 }
2862
2863 } else {
2864 // -------------------------------------------
2865 // JavaScript example: 'array[index](1, 2, 3)'
2866 // -------------------------------------------
2867
2868 // Load the function to call from the property through a reference.
2869 Reference ref(this, property);
2870 ref.GetValue(NOT_INSIDE_TYPEOF);
2871
2872 // Pass receiver to called function.
2873 if (property->is_synthetic()) {
2874 // Use global object as receiver.
2875 LoadGlobalReceiver();
2876 } else {
2877 // The reference's size is non-negative.
2878 frame_->PushElementAt(ref.size());
2879 }
2880
2881 // Call the function.
2882 CallWithArguments(args, node->position());
2883 }
2884
2885 } else {
2886 // ----------------------------------
2887 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
2888 // ----------------------------------
2889
2890 // Load the function.
2891 Load(function);
2892
2893 // Pass the global proxy as the receiver.
2894 LoadGlobalReceiver();
2895
2896 // Call the function.
2897 CallWithArguments(args, node->position());
2898 }
2899}
2900
2901
2902void CodeGenerator::VisitCallNew(CallNew* node) {
2903 Comment cmnt(masm_, "[ CallNew");
2904
2905 // According to ECMA-262, section 11.2.2, page 44, the function
2906 // expression in new calls must be evaluated before the
2907 // arguments. This is different from ordinary calls, where the
2908 // actual function to call is resolved after the arguments have been
2909 // evaluated.
2910
2911 // Compute function to call and use the global object as the
2912 // receiver. There is no need to use the global proxy here because
2913 // it will always be replaced with a newly allocated object.
2914 Load(node->expression());
2915 LoadGlobal();
2916
2917 // Push the arguments ("left-to-right") on the stack.
2918 ZoneList<Expression*>* args = node->arguments();
2919 int arg_count = args->length();
2920 for (int i = 0; i < arg_count; i++) {
2921 Load(args->at(i));
2922 }
2923
2924 // Call the construct call builtin that handles allocation and
2925 // constructor invocation.
2926 CodeForSourcePosition(node->position());
2927 Result result = frame_->CallConstructor(arg_count);
2928 // Replace the function on the stack with the result.
2929 frame_->SetElementAt(0, &result);
2930}
2931
2932
2933void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2934 if (CheckForInlineRuntimeCall(node)) {
2935 return;
2936 }
2937
2938 ZoneList<Expression*>* args = node->arguments();
2939 Comment cmnt(masm_, "[ CallRuntime");
2940 Runtime::Function* function = node->function();
2941
2942 if (function == NULL) {
2943 // Prepare stack for calling JS runtime function.
2944 frame_->Push(node->name());
2945 // Push the builtins object found in the current global object.
2946 Result temp = allocator()->Allocate();
2947 ASSERT(temp.is_valid());
2948 __ movq(temp.reg(), GlobalObject());
2949 __ movq(temp.reg(),
2950 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
2951 frame_->Push(&temp);
2952 }
2953
2954 // Push the arguments ("left-to-right").
2955 int arg_count = args->length();
2956 for (int i = 0; i < arg_count; i++) {
2957 Load(args->at(i));
2958 }
2959
2960 if (function == NULL) {
2961 // Call the JS runtime function.
2962 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2963 arg_count,
2964 loop_nesting_);
2965 frame_->RestoreContextRegister();
2966 frame_->SetElementAt(0, &answer);
2967 } else {
2968 // Call the C runtime function.
2969 Result answer = frame_->CallRuntime(function, arg_count);
2970 frame_->Push(&answer);
2971 }
2972}
2973
2974
2975void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
2976 // Note that because of NOT and an optimization in comparison of a typeof
2977 // expression to a literal string, this function can fail to leave a value
2978 // on top of the frame or in the cc register.
2979 Comment cmnt(masm_, "[ UnaryOperation");
2980
2981 Token::Value op = node->op();
2982
2983 if (op == Token::NOT) {
2984 // Swap the true and false targets but keep the same actual label
2985 // as the fall through.
2986 destination()->Invert();
2987 LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
2988 // Swap the labels back.
2989 destination()->Invert();
2990
2991 } else if (op == Token::DELETE) {
2992 Property* property = node->expression()->AsProperty();
2993 if (property != NULL) {
2994 Load(property->obj());
2995 Load(property->key());
2996 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
2997 frame_->Push(&answer);
2998 return;
2999 }
3000
3001 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3002 if (variable != NULL) {
3003 Slot* slot = variable->slot();
3004 if (variable->is_global()) {
3005 LoadGlobal();
3006 frame_->Push(variable->name());
3007 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3008 CALL_FUNCTION, 2);
3009 frame_->Push(&answer);
3010 return;
3011
3012 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3013 // Call the runtime to look up the context holding the named
3014 // variable. Sync the virtual frame eagerly so we can push the
3015 // arguments directly into place.
3016 frame_->SyncRange(0, frame_->element_count() - 1);
3017 frame_->EmitPush(rsi);
3018 frame_->EmitPush(variable->name());
3019 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3020 ASSERT(context.is_register());
3021 frame_->EmitPush(context.reg());
3022 context.Unuse();
3023 frame_->EmitPush(variable->name());
3024 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3025 CALL_FUNCTION, 2);
3026 frame_->Push(&answer);
3027 return;
3028 }
3029
3030 // Default: Result of deleting non-global, not dynamically
3031 // introduced variables is false.
3032 frame_->Push(Factory::false_value());
3033
3034 } else {
3035 // Default: Result of deleting expressions is true.
3036 Load(node->expression()); // may have side-effects
3037 frame_->SetElementAt(0, Factory::true_value());
3038 }
3039
3040 } else if (op == Token::TYPEOF) {
3041 // Special case for loading the typeof expression; see comment on
3042 // LoadTypeofExpression().
3043 LoadTypeofExpression(node->expression());
3044 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3045 frame_->Push(&answer);
3046
3047 } else if (op == Token::VOID) {
3048 Expression* expression = node->expression();
3049 if (expression && expression->AsLiteral() && (
3050 expression->AsLiteral()->IsTrue() ||
3051 expression->AsLiteral()->IsFalse() ||
3052 expression->AsLiteral()->handle()->IsNumber() ||
3053 expression->AsLiteral()->handle()->IsString() ||
3054 expression->AsLiteral()->handle()->IsJSRegExp() ||
3055 expression->AsLiteral()->IsNull())) {
3056 // Omit evaluating the value of the primitive literal.
3057 // It will be discarded anyway, and can have no side effect.
3058 frame_->Push(Factory::undefined_value());
3059 } else {
3060 Load(node->expression());
3061 frame_->SetElementAt(0, Factory::undefined_value());
3062 }
3063
3064 } else {
3065 Load(node->expression());
3066 switch (op) {
3067 case Token::NOT:
3068 case Token::DELETE:
3069 case Token::TYPEOF:
3070 UNREACHABLE(); // handled above
3071 break;
3072
3073 case Token::SUB: {
3074 bool overwrite =
3075 (node->AsBinaryOperation() != NULL &&
3076 node->AsBinaryOperation()->ResultOverwriteAllowed());
3077 UnarySubStub stub(overwrite);
3078 // TODO(1222589): remove dependency of TOS being cached inside stub
3079 Result operand = frame_->Pop();
3080 Result answer = frame_->CallStub(&stub, &operand);
3081 frame_->Push(&answer);
3082 break;
3083 }
3084
3085 case Token::BIT_NOT: {
3086 // Smi check.
3087 JumpTarget smi_label;
3088 JumpTarget continue_label;
3089 Result operand = frame_->Pop();
3090 operand.ToRegister();
3091
3092 Condition is_smi = masm_->CheckSmi(operand.reg());
3093 smi_label.Branch(is_smi, &operand);
3094
3095 frame_->Push(&operand); // undo popping of TOS
3096 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
3097 CALL_FUNCTION, 1);
3098 continue_label.Jump(&answer);
3099 smi_label.Bind(&answer);
3100 answer.ToRegister();
3101 frame_->Spill(answer.reg());
3102 __ SmiNot(answer.reg(), answer.reg());
3103 continue_label.Bind(&answer);
3104 frame_->Push(&answer);
3105 break;
3106 }
3107
3108 case Token::ADD: {
3109 // Smi check.
3110 JumpTarget continue_label;
3111 Result operand = frame_->Pop();
3112 operand.ToRegister();
3113 Condition is_smi = masm_->CheckSmi(operand.reg());
3114 continue_label.Branch(is_smi, &operand);
3115 frame_->Push(&operand);
3116 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3117 CALL_FUNCTION, 1);
3118
3119 continue_label.Bind(&answer);
3120 frame_->Push(&answer);
3121 break;
3122 }
3123
3124 default:
3125 UNREACHABLE();
3126 }
3127 }
3128}
3129
3130
3131// The value in dst was optimistically incremented or decremented. The
3132// result overflowed or was not smi tagged. Undo the operation, call
3133// into the runtime to convert the argument to a number, and call the
3134// specialized add or subtract stub. The result is left in dst.
3135class DeferredPrefixCountOperation: public DeferredCode {
3136 public:
3137 DeferredPrefixCountOperation(Register dst, bool is_increment)
3138 : dst_(dst), is_increment_(is_increment) {
3139 set_comment("[ DeferredCountOperation");
3140 }
3141
3142 virtual void Generate();
3143
3144 private:
3145 Register dst_;
3146 bool is_increment_;
3147};
3148
3149
3150void DeferredPrefixCountOperation::Generate() {
3151 __ push(dst_);
3152 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3153 __ push(rax);
3154 __ push(Immediate(Smi::FromInt(1)));
3155 if (is_increment_) {
3156 __ CallRuntime(Runtime::kNumberAdd, 2);
3157 } else {
3158 __ CallRuntime(Runtime::kNumberSub, 2);
3159 }
3160 if (!dst_.is(rax)) __ movq(dst_, rax);
3161}
3162
3163
3164// The value in dst was optimistically incremented or decremented. The
3165// result overflowed or was not smi tagged. Undo the operation and call
3166// into the runtime to convert the argument to a number. Update the
3167// original value in old. Call the specialized add or subtract stub.
3168// The result is left in dst.
3169class DeferredPostfixCountOperation: public DeferredCode {
3170 public:
3171 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
3172 : dst_(dst), old_(old), is_increment_(is_increment) {
3173 set_comment("[ DeferredCountOperation");
3174 }
3175
3176 virtual void Generate();
3177
3178 private:
3179 Register dst_;
3180 Register old_;
3181 bool is_increment_;
3182};
3183
3184
3185void DeferredPostfixCountOperation::Generate() {
3186 __ push(dst_);
3187 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3188
3189 // Save the result of ToNumber to use as the old value.
3190 __ push(rax);
3191
3192 // Call the runtime for the addition or subtraction.
3193 __ push(rax);
3194 __ push(Immediate(Smi::FromInt(1)));
3195 if (is_increment_) {
3196 __ CallRuntime(Runtime::kNumberAdd, 2);
3197 } else {
3198 __ CallRuntime(Runtime::kNumberSub, 2);
3199 }
3200 if (!dst_.is(rax)) __ movq(dst_, rax);
3201 __ pop(old_);
3202}
3203
3204
3205void CodeGenerator::VisitCountOperation(CountOperation* node) {
3206 Comment cmnt(masm_, "[ CountOperation");
3207
3208 bool is_postfix = node->is_postfix();
3209 bool is_increment = node->op() == Token::INC;
3210
3211 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3212 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3213
3214 // Postfix operations need a stack slot under the reference to hold
3215 // the old value while the new value is being stored. This is so that
3216 // in the case that storing the new value requires a call, the old
3217 // value will be in the frame to be spilled.
3218 if (is_postfix) frame_->Push(Smi::FromInt(0));
3219
3220 { Reference target(this, node->expression());
3221 if (target.is_illegal()) {
3222 // Spoof the virtual frame to have the expected height (one higher
3223 // than on entry).
3224 if (!is_postfix) frame_->Push(Smi::FromInt(0));
3225 return;
3226 }
3227 target.TakeValue(NOT_INSIDE_TYPEOF);
3228
3229 Result new_value = frame_->Pop();
3230 new_value.ToRegister();
3231
3232 Result old_value; // Only allocated in the postfix case.
3233 if (is_postfix) {
3234 // Allocate a temporary to preserve the old value.
3235 old_value = allocator_->Allocate();
3236 ASSERT(old_value.is_valid());
3237 __ movq(old_value.reg(), new_value.reg());
3238 }
3239 // Ensure the new value is writable.
3240 frame_->Spill(new_value.reg());
3241
3242 DeferredCode* deferred = NULL;
3243 if (is_postfix) {
3244 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3245 old_value.reg(),
3246 is_increment);
3247 } else {
3248 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3249 is_increment);
3250 }
3251
3252 __ movq(kScratchRegister, new_value.reg());
3253 if (is_increment) {
3254 __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
3255 } else {
3256 __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
3257 }
3258 // Smi test.
3259 deferred->Branch(overflow);
3260 __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
3261 __ movq(new_value.reg(), kScratchRegister);
3262 deferred->BindExit();
3263
3264 // Postfix: store the old value in the allocated slot under the
3265 // reference.
3266 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3267
3268 frame_->Push(&new_value);
3269 // Non-constant: update the reference.
3270 if (!is_const) target.SetValue(NOT_CONST_INIT);
3271 }
3272
3273 // Postfix: drop the new value and use the old.
3274 if (is_postfix) frame_->Drop();
3275}
3276
3277
3278void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3279 // TODO(X64): This code was copied verbatim from codegen-ia32.
3280 // Either find a reason to change it or move it to a shared location.
3281
3282 // Note that due to an optimization in comparison operations (typeof
3283 // compared to a string literal), we can evaluate a binary expression such
3284 // as AND or OR and not leave a value on the frame or in the cc register.
3285 Comment cmnt(masm_, "[ BinaryOperation");
3286 Token::Value op = node->op();
3287
3288 // According to ECMA-262 section 11.11, page 58, the binary logical
3289 // operators must yield the result of one of the two expressions
3290 // before any ToBoolean() conversions. This means that the value
3291 // produced by a && or || operator is not necessarily a boolean.
3292
3293 // NOTE: If the left hand side produces a materialized value (not
3294 // control flow), we force the right hand side to do the same. This
3295 // is necessary because we assume that if we get control flow on the
3296 // last path out of an expression we got it on all paths.
3297 if (op == Token::AND) {
3298 JumpTarget is_true;
3299 ControlDestination dest(&is_true, destination()->false_target(), true);
3300 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
3301
3302 if (dest.false_was_fall_through()) {
3303 // The current false target was used as the fall-through. If
3304 // there are no dangling jumps to is_true then the left
3305 // subexpression was unconditionally false. Otherwise we have
3306 // paths where we do have to evaluate the right subexpression.
3307 if (is_true.is_linked()) {
3308 // We need to compile the right subexpression. If the jump to
3309 // the current false target was a forward jump then we have a
3310 // valid frame, we have just bound the false target, and we
3311 // have to jump around the code for the right subexpression.
3312 if (has_valid_frame()) {
3313 destination()->false_target()->Unuse();
3314 destination()->false_target()->Jump();
3315 }
3316 is_true.Bind();
3317 // The left subexpression compiled to control flow, so the
3318 // right one is free to do so as well.
3319 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3320 } else {
3321 // We have actually just jumped to or bound the current false
3322 // target but the current control destination is not marked as
3323 // used.
3324 destination()->Use(false);
3325 }
3326
3327 } else if (dest.is_used()) {
3328 // The left subexpression compiled to control flow (and is_true
3329 // was just bound), so the right is free to do so as well.
3330 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3331
3332 } else {
3333 // We have a materialized value on the frame, so we exit with
3334 // one on all paths. There are possibly also jumps to is_true
3335 // from nested subexpressions.
3336 JumpTarget pop_and_continue;
3337 JumpTarget exit;
3338
3339 // Avoid popping the result if it converts to 'false' using the
3340 // standard ToBoolean() conversion as described in ECMA-262,
3341 // section 9.2, page 30.
3342 //
3343 // Duplicate the TOS value. The duplicate will be popped by
3344 // ToBoolean.
3345 frame_->Dup();
3346 ControlDestination dest(&pop_and_continue, &exit, true);
3347 ToBoolean(&dest);
3348
3349 // Pop the result of evaluating the first part.
3350 frame_->Drop();
3351
3352 // Compile right side expression.
3353 is_true.Bind();
3354 Load(node->right());
3355
3356 // Exit (always with a materialized value).
3357 exit.Bind();
3358 }
3359
3360 } else if (op == Token::OR) {
3361 JumpTarget is_false;
3362 ControlDestination dest(destination()->true_target(), &is_false, false);
3363 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
3364
3365 if (dest.true_was_fall_through()) {
3366 // The current true target was used as the fall-through. If
3367 // there are no dangling jumps to is_false then the left
3368 // subexpression was unconditionally true. Otherwise we have
3369 // paths where we do have to evaluate the right subexpression.
3370 if (is_false.is_linked()) {
3371 // We need to compile the right subexpression. If the jump to
3372 // the current true target was a forward jump then we have a
3373 // valid frame, we have just bound the true target, and we
3374 // have to jump around the code for the right subexpression.
3375 if (has_valid_frame()) {
3376 destination()->true_target()->Unuse();
3377 destination()->true_target()->Jump();
3378 }
3379 is_false.Bind();
3380 // The left subexpression compiled to control flow, so the
3381 // right one is free to do so as well.
3382 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3383 } else {
3384 // We have just jumped to or bound the current true target but
3385 // the current control destination is not marked as used.
3386 destination()->Use(true);
3387 }
3388
3389 } else if (dest.is_used()) {
3390 // The left subexpression compiled to control flow (and is_false
3391 // was just bound), so the right is free to do so as well.
3392 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3393
3394 } else {
3395 // We have a materialized value on the frame, so we exit with
3396 // one on all paths. There are possibly also jumps to is_false
3397 // from nested subexpressions.
3398 JumpTarget pop_and_continue;
3399 JumpTarget exit;
3400
3401 // Avoid popping the result if it converts to 'true' using the
3402 // standard ToBoolean() conversion as described in ECMA-262,
3403 // section 9.2, page 30.
3404 //
3405 // Duplicate the TOS value. The duplicate will be popped by
3406 // ToBoolean.
3407 frame_->Dup();
3408 ControlDestination dest(&exit, &pop_and_continue, false);
3409 ToBoolean(&dest);
3410
3411 // Pop the result of evaluating the first part.
3412 frame_->Drop();
3413
3414 // Compile right side expression.
3415 is_false.Bind();
3416 Load(node->right());
3417
3418 // Exit (always with a materialized value).
3419 exit.Bind();
3420 }
3421
3422 } else {
3423 // NOTE: The code below assumes that the slow cases (calls to runtime)
3424 // never return a constant/immutable object.
3425 OverwriteMode overwrite_mode = NO_OVERWRITE;
3426 if (node->left()->AsBinaryOperation() != NULL &&
3427 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3428 overwrite_mode = OVERWRITE_LEFT;
3429 } else if (node->right()->AsBinaryOperation() != NULL &&
3430 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3431 overwrite_mode = OVERWRITE_RIGHT;
3432 }
3433
3434 Load(node->left());
3435 Load(node->right());
3436 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3437 }
3438}
3439
3440
3441
3442void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3443 Comment cmnt(masm_, "[ CompareOperation");
3444
3445 // Get the expressions from the node.
3446 Expression* left = node->left();
3447 Expression* right = node->right();
3448 Token::Value op = node->op();
3449 // To make typeof testing for natives implemented in JavaScript really
3450 // efficient, we generate special code for expressions of the form:
3451 // 'typeof <expression> == <string>'.
3452 UnaryOperation* operation = left->AsUnaryOperation();
3453 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3454 (operation != NULL && operation->op() == Token::TYPEOF) &&
3455 (right->AsLiteral() != NULL &&
3456 right->AsLiteral()->handle()->IsString())) {
3457 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3458
3459 // Load the operand and move it to a register.
3460 LoadTypeofExpression(operation->expression());
3461 Result answer = frame_->Pop();
3462 answer.ToRegister();
3463
3464 if (check->Equals(Heap::number_symbol())) {
3465 Condition is_smi = masm_->CheckSmi(answer.reg());
3466 destination()->true_target()->Branch(is_smi);
3467 frame_->Spill(answer.reg());
3468 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3469 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3470 answer.Unuse();
3471 destination()->Split(equal);
3472
3473 } else if (check->Equals(Heap::string_symbol())) {
3474 Condition is_smi = masm_->CheckSmi(answer.reg());
3475 destination()->false_target()->Branch(is_smi);
3476
3477 // It can be an undetectable string object.
3478 __ movq(kScratchRegister,
3479 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3480 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3481 Immediate(1 << Map::kIsUndetectable));
3482 destination()->false_target()->Branch(not_zero);
3483 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3484 answer.Unuse();
3485 destination()->Split(below); // Unsigned byte comparison needed.
3486
3487 } else if (check->Equals(Heap::boolean_symbol())) {
3488 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3489 destination()->true_target()->Branch(equal);
3490 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3491 answer.Unuse();
3492 destination()->Split(equal);
3493
3494 } else if (check->Equals(Heap::undefined_symbol())) {
3495 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3496 destination()->true_target()->Branch(equal);
3497
3498 Condition is_smi = masm_->CheckSmi(answer.reg());
3499 destination()->false_target()->Branch(is_smi);
3500
3501 // It can be an undetectable object.
3502 __ movq(kScratchRegister,
3503 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3504 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3505 Immediate(1 << Map::kIsUndetectable));
3506 answer.Unuse();
3507 destination()->Split(not_zero);
3508
3509 } else if (check->Equals(Heap::function_symbol())) {
3510 Condition is_smi = masm_->CheckSmi(answer.reg());
3511 destination()->false_target()->Branch(is_smi);
3512 frame_->Spill(answer.reg());
3513 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3514 answer.Unuse();
3515 destination()->Split(equal);
3516
3517 } else if (check->Equals(Heap::object_symbol())) {
3518 Condition is_smi = masm_->CheckSmi(answer.reg());
3519 destination()->false_target()->Branch(is_smi);
3520 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3521 destination()->true_target()->Branch(equal);
3522
3523 // It can be an undetectable object.
3524 __ movq(kScratchRegister,
3525 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3526 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3527 Immediate(1 << Map::kIsUndetectable));
3528 destination()->false_target()->Branch(not_zero);
3529 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3530 destination()->false_target()->Branch(below);
3531 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3532 answer.Unuse();
3533 destination()->Split(below_equal);
3534 } else {
3535 // Uncommon case: typeof testing against a string literal that is
3536 // never returned from the typeof operator.
3537 answer.Unuse();
3538 destination()->Goto(false);
3539 }
3540 return;
3541 }
3542
3543 Condition cc = no_condition;
3544 bool strict = false;
3545 switch (op) {
3546 case Token::EQ_STRICT:
3547 strict = true;
3548 // Fall through
3549 case Token::EQ:
3550 cc = equal;
3551 break;
3552 case Token::LT:
3553 cc = less;
3554 break;
3555 case Token::GT:
3556 cc = greater;
3557 break;
3558 case Token::LTE:
3559 cc = less_equal;
3560 break;
3561 case Token::GTE:
3562 cc = greater_equal;
3563 break;
3564 case Token::IN: {
3565 Load(left);
3566 Load(right);
3567 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3568 frame_->Push(&answer); // push the result
3569 return;
3570 }
3571 case Token::INSTANCEOF: {
3572 Load(left);
3573 Load(right);
3574 InstanceofStub stub;
3575 Result answer = frame_->CallStub(&stub, 2);
3576 answer.ToRegister();
3577 __ testq(answer.reg(), answer.reg());
3578 answer.Unuse();
3579 destination()->Split(zero);
3580 return;
3581 }
3582 default:
3583 UNREACHABLE();
3584 }
3585 Load(left);
3586 Load(right);
3587 Comparison(cc, strict, destination());
3588}
3589
3590
3591void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3592 frame_->PushFunction();
3593}
3594
3595
3596void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3597 ASSERT(args->length() == 1);
3598
3599 // ArgumentsAccessStub expects the key in rdx and the formal
3600 // parameter count in rax.
3601 Load(args->at(0));
3602 Result key = frame_->Pop();
3603 // Explicitly create a constant result.
3604 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3605 // Call the shared stub to get to arguments[key].
3606 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3607 Result result = frame_->CallStub(&stub, &key, &count);
3608 frame_->Push(&result);
3609}
3610
3611
3612void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3613 ASSERT(args->length() == 1);
3614 Load(args->at(0));
3615 Result value = frame_->Pop();
3616 value.ToRegister();
3617 ASSERT(value.is_valid());
3618 Condition is_smi = masm_->CheckSmi(value.reg());
3619 destination()->false_target()->Branch(is_smi);
3620 // It is a heap object - get map.
3621 // Check if the object is a JS array or not.
3622 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3623 value.Unuse();
3624 destination()->Split(equal);
3625}
3626
3627
3628void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3629 ASSERT(args->length() == 0);
3630
3631 // Get the frame pointer for the calling frame.
3632 Result fp = allocator()->Allocate();
3633 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3634
3635 // Skip the arguments adaptor frame if it exists.
3636 Label check_frame_marker;
3637 __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3638 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3639 __ j(not_equal, &check_frame_marker);
3640 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3641
3642 // Check the marker in the calling frame.
3643 __ bind(&check_frame_marker);
3644 __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3645 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
3646 fp.Unuse();
3647 destination()->Split(equal);
3648}
3649
3650
3651void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3652 ASSERT(args->length() == 0);
3653 // ArgumentsAccessStub takes the parameter count as an input argument
3654 // in register eax. Create a constant result for it.
3655 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3656 // Call the shared stub to get to the arguments.length.
3657 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3658 Result result = frame_->CallStub(&stub, &count);
3659 frame_->Push(&result);
3660}
3661
3662
3663void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3664 Comment(masm_, "[ GenerateFastCharCodeAt");
3665 ASSERT(args->length() == 2);
3666
3667 Label slow_case;
3668 Label end;
3669 Label not_a_flat_string;
3670 Label a_cons_string;
3671 Label try_again_with_new_string;
3672 Label ascii_string;
3673 Label got_char_code;
3674
3675 Load(args->at(0));
3676 Load(args->at(1));
3677 Result index = frame_->Pop();
3678 Result object = frame_->Pop();
3679
3680 // Get register rcx to use as shift amount later.
3681 Result shift_amount;
3682 if (object.is_register() && object.reg().is(rcx)) {
3683 Result fresh = allocator_->Allocate();
3684 shift_amount = object;
3685 object = fresh;
3686 __ movq(object.reg(), rcx);
3687 }
3688 if (index.is_register() && index.reg().is(rcx)) {
3689 Result fresh = allocator_->Allocate();
3690 shift_amount = index;
3691 index = fresh;
3692 __ movq(index.reg(), rcx);
3693 }
3694 // There could be references to ecx in the frame. Allocating will
3695 // spill them, otherwise spill explicitly.
3696 if (shift_amount.is_valid()) {
3697 frame_->Spill(rcx);
3698 } else {
3699 shift_amount = allocator()->Allocate(rcx);
3700 }
3701 ASSERT(shift_amount.is_register());
3702 ASSERT(shift_amount.reg().is(rcx));
3703 ASSERT(allocator_->count(rcx) == 1);
3704
3705 // We will mutate the index register and possibly the object register.
3706 // The case where they are somehow the same register is handled
3707 // because we only mutate them in the case where the receiver is a
3708 // heap object and the index is not.
3709 object.ToRegister();
3710 index.ToRegister();
3711 frame_->Spill(object.reg());
3712 frame_->Spill(index.reg());
3713
3714 // We need a single extra temporary register.
3715 Result temp = allocator()->Allocate();
3716 ASSERT(temp.is_valid());
3717
3718 // There is no virtual frame effect from here up to the final result
3719 // push.
3720
3721 // If the receiver is a smi trigger the slow case.
3722 __ JumpIfSmi(object.reg(), &slow_case);
3723
3724 // If the index is negative or non-smi trigger the slow case.
3725 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3726
3727 // Untag the index.
3728 __ SmiToInteger32(index.reg(), index.reg());
3729
3730 __ bind(&try_again_with_new_string);
3731 // Fetch the instance type of the receiver into rcx.
3732 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3733 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3734 // If the receiver is not a string trigger the slow case.
3735 __ testb(rcx, Immediate(kIsNotStringMask));
3736 __ j(not_zero, &slow_case);
3737
3738 // Here we make assumptions about the tag values and the shifts needed.
3739 // See the comment in objects.h.
3740 ASSERT(kLongStringTag == 0);
3741 ASSERT(kMediumStringTag + String::kLongLengthShift ==
3742 String::kMediumLengthShift);
3743 ASSERT(kShortStringTag + String::kLongLengthShift ==
3744 String::kShortLengthShift);
3745 __ and_(rcx, Immediate(kStringSizeMask));
3746 __ addq(rcx, Immediate(String::kLongLengthShift));
3747 // Fetch the length field into the temporary register.
3748 __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
3749 __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
3750 // Check for index out of range.
3751 __ cmpl(index.reg(), temp.reg());
3752 __ j(greater_equal, &slow_case);
3753 // Reload the instance type (into the temp register this time)..
3754 __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3755 __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3756
3757 // We need special handling for non-flat strings.
3758 ASSERT(kSeqStringTag == 0);
3759 __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3760 __ j(not_zero, &not_a_flat_string);
3761 // Check for 1-byte or 2-byte string.
3762 __ testb(temp.reg(), Immediate(kStringEncodingMask));
3763 __ j(not_zero, &ascii_string);
3764
3765 // 2-byte string.
3766 // Load the 2-byte character code into the temp register.
3767 __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3768 index.reg(),
3769 times_2,
3770 SeqTwoByteString::kHeaderSize));
3771 __ jmp(&got_char_code);
3772
3773 // ASCII string.
3774 __ bind(&ascii_string);
3775 // Load the byte into the temp register.
3776 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3777 index.reg(),
3778 times_1,
3779 SeqAsciiString::kHeaderSize));
3780 __ bind(&got_char_code);
3781 __ Integer32ToSmi(temp.reg(), temp.reg());
3782 __ jmp(&end);
3783
3784 // Handle non-flat strings.
3785 __ bind(&not_a_flat_string);
3786 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3787 __ cmpb(temp.reg(), Immediate(kConsStringTag));
3788 __ j(equal, &a_cons_string);
3789 __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
3790 __ j(not_equal, &slow_case);
3791
3792 // SlicedString.
3793 // Add the offset to the index and trigger the slow case on overflow.
3794 __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
3795 __ j(overflow, &slow_case);
3796 // Getting the underlying string is done by running the cons string code.
3797
3798 // ConsString.
3799 __ bind(&a_cons_string);
3800 // Get the first of the two strings. Both sliced and cons strings
3801 // store their source string at the same offset.
3802 ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
3803 __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3804 __ jmp(&try_again_with_new_string);
3805
3806 __ bind(&slow_case);
3807 // Move the undefined value into the result register, which will
3808 // trigger the slow case.
3809 __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3810
3811 __ bind(&end);
3812 frame_->Push(&temp);
3813}
3814
3815
3816void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3817 ASSERT(args->length() == 1);
3818 Load(args->at(0));
3819 Result value = frame_->Pop();
3820 value.ToRegister();
3821 ASSERT(value.is_valid());
3822 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3823 value.Unuse();
3824 destination()->Split(positive_smi);
3825}
3826
3827
3828void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3829 ASSERT(args->length() == 1);
3830 Load(args->at(0));
3831 Result value = frame_->Pop();
3832 value.ToRegister();
3833 ASSERT(value.is_valid());
3834 Condition is_smi = masm_->CheckSmi(value.reg());
3835 value.Unuse();
3836 destination()->Split(is_smi);
3837}
3838
3839
3840void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3841 // Conditionally generate a log call.
3842 // Args:
3843 // 0 (literal string): The type of logging (corresponds to the flags).
3844 // This is used to determine whether or not to generate the log call.
3845 // 1 (string): Format string. Access the string at argument index 2
3846 // with '%2s' (see Logger::LogRuntime for all the formats).
3847 // 2 (array): Arguments to the format string.
3848 ASSERT_EQ(args->length(), 3);
3849#ifdef ENABLE_LOGGING_AND_PROFILING
3850 if (ShouldGenerateLog(args->at(0))) {
3851 Load(args->at(1));
3852 Load(args->at(2));
3853 frame_->CallRuntime(Runtime::kLog, 2);
3854 }
3855#endif
3856 // Finally, we're expected to leave a value on the top of the stack.
3857 frame_->Push(Factory::undefined_value());
3858}
3859
3860
3861void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3862 ASSERT(args->length() == 2);
3863
3864 // Load the two objects into registers and perform the comparison.
3865 Load(args->at(0));
3866 Load(args->at(1));
3867 Result right = frame_->Pop();
3868 Result left = frame_->Pop();
3869 right.ToRegister();
3870 left.ToRegister();
3871 __ cmpq(right.reg(), left.reg());
3872 right.Unuse();
3873 left.Unuse();
3874 destination()->Split(equal);
3875}
3876
3877
3878void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
3879 ASSERT(args->length() == 0);
3880 // RBP value is aligned, so it should be tagged as a smi (without necesarily
3881 // being padded as a smi).
3882 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3883 Result rbp_as_smi = allocator_->Allocate();
3884 ASSERT(rbp_as_smi.is_valid());
3885 __ movq(rbp_as_smi.reg(), rbp);
3886 frame_->Push(&rbp_as_smi);
3887}
3888
3889
3890void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3891 ASSERT(args->length() == 0);
3892 frame_->SpillAll();
3893 __ push(rsi);
3894
3895 // Make sure the frame is aligned like the OS expects.
3896 static const int kFrameAlignment = OS::ActivationFrameAlignment();
3897 if (kFrameAlignment > 0) {
3898 ASSERT(IsPowerOf2(kFrameAlignment));
3899 __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
3900 __ and_(rsp, Immediate(-kFrameAlignment));
3901 }
3902
3903 // Call V8::RandomPositiveSmi().
3904 __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
3905
3906 // Restore stack pointer from callee-saved register.
3907 if (kFrameAlignment > 0) {
3908 __ movq(rsp, rbx);
3909 }
3910
3911 __ pop(rsi);
3912 Result result = allocator_->Allocate(rax);
3913 frame_->Push(&result);
3914}
3915
3916
3917void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3918 JumpTarget done;
3919 JumpTarget call_runtime;
3920 ASSERT(args->length() == 1);
3921
3922 // Load number and duplicate it.
3923 Load(args->at(0));
3924 frame_->Dup();
3925
3926 // Get the number into an unaliased register and load it onto the
3927 // floating point stack still leaving one copy on the frame.
3928 Result number = frame_->Pop();
3929 number.ToRegister();
3930 frame_->Spill(number.reg());
3931 FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
3932 number.Unuse();
3933
3934 // Perform the operation on the number.
3935 switch (op) {
3936 case SIN:
3937 __ fsin();
3938 break;
3939 case COS:
3940 __ fcos();
3941 break;
3942 }
3943
3944 // Go slow case if argument to operation is out of range.
3945 Result eax_reg = allocator()->Allocate(rax);
3946 ASSERT(eax_reg.is_valid());
3947 __ fnstsw_ax();
3948 __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
3949 eax_reg.Unuse();
3950 call_runtime.Branch(not_zero);
3951
3952 // Allocate heap number for result if possible.
3953 Result scratch = allocator()->Allocate();
3954 Result heap_number = allocator()->Allocate();
3955 FloatingPointHelper::AllocateHeapNumber(masm_,
3956 call_runtime.entry_label(),
3957 scratch.reg(),
3958 heap_number.reg());
3959 scratch.Unuse();
3960
3961 // Store the result in the allocated heap number.
3962 __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
3963 // Replace the extra copy of the argument with the result.
3964 frame_->SetElementAt(0, &heap_number);
3965 done.Jump();
3966
3967 call_runtime.Bind();
3968 // Free ST(0) which was not popped before calling into the runtime.
3969 __ ffree(0);
3970 Result answer;
3971 switch (op) {
3972 case SIN:
3973 answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
3974 break;
3975 case COS:
3976 answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
3977 break;
3978 }
3979 frame_->Push(&answer);
3980 done.Bind();
3981}
3982
3983
3984void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3985 ASSERT(args->length() == 1);
3986 JumpTarget leave, null, function, non_function_constructor;
3987 Load(args->at(0)); // Load the object.
3988 Result obj = frame_->Pop();
3989 obj.ToRegister();
3990 frame_->Spill(obj.reg());
3991
3992 // If the object is a smi, we return null.
3993 Condition is_smi = masm_->CheckSmi(obj.reg());
3994 null.Branch(is_smi);
3995
3996 // Check that the object is a JS object but take special care of JS
3997 // functions to make sure they have 'Function' as their class.
3998
3999 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4000 null.Branch(below);
4001
4002 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4003 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4004 // LAST_JS_OBJECT_TYPE.
4005 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4006 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4007 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4008 function.Branch(equal);
4009
4010 // Check if the constructor in the map is a function.
4011 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4012 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4013 non_function_constructor.Branch(not_equal);
4014
4015 // The obj register now contains the constructor function. Grab the
4016 // instance class name from there.
4017 __ movq(obj.reg(),
4018 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4019 __ movq(obj.reg(),
4020 FieldOperand(obj.reg(),
4021 SharedFunctionInfo::kInstanceClassNameOffset));
4022 frame_->Push(&obj);
4023 leave.Jump();
4024
4025 // Functions have class 'Function'.
4026 function.Bind();
4027 frame_->Push(Factory::function_class_symbol());
4028 leave.Jump();
4029
4030 // Objects with a non-function constructor have class 'Object'.
4031 non_function_constructor.Bind();
4032 frame_->Push(Factory::Object_symbol());
4033 leave.Jump();
4034
4035 // Non-JS objects have class null.
4036 null.Bind();
4037 frame_->Push(Factory::null_value());
4038
4039 // All done.
4040 leave.Bind();
4041}
4042
4043
4044void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4045 ASSERT(args->length() == 2);
4046 JumpTarget leave;
4047 Load(args->at(0)); // Load the object.
4048 Load(args->at(1)); // Load the value.
4049 Result value = frame_->Pop();
4050 Result object = frame_->Pop();
4051 value.ToRegister();
4052 object.ToRegister();
4053
4054 // if (object->IsSmi()) return value.
4055 Condition is_smi = masm_->CheckSmi(object.reg());
4056 leave.Branch(is_smi, &value);
4057
4058 // It is a heap object - get its map.
4059 Result scratch = allocator_->Allocate();
4060 ASSERT(scratch.is_valid());
4061 // if (!object->IsJSValue()) return value.
4062 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4063 leave.Branch(not_equal, &value);
4064
4065 // Store the value.
4066 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4067 // Update the write barrier. Save the value as it will be
4068 // overwritten by the write barrier code and is needed afterward.
4069 Result duplicate_value = allocator_->Allocate();
4070 ASSERT(duplicate_value.is_valid());
4071 __ movq(duplicate_value.reg(), value.reg());
4072 // The object register is also overwritten by the write barrier and
4073 // possibly aliased in the frame.
4074 frame_->Spill(object.reg());
4075 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4076 scratch.reg());
4077 object.Unuse();
4078 scratch.Unuse();
4079 duplicate_value.Unuse();
4080
4081 // Leave.
4082 leave.Bind(&value);
4083 frame_->Push(&value);
4084}
4085
4086
4087void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4088 ASSERT(args->length() == 1);
4089 JumpTarget leave;
4090 Load(args->at(0)); // Load the object.
4091 frame_->Dup();
4092 Result object = frame_->Pop();
4093 object.ToRegister();
4094 ASSERT(object.is_valid());
4095 // if (object->IsSmi()) return object.
4096 Condition is_smi = masm_->CheckSmi(object.reg());
4097 leave.Branch(is_smi);
4098 // It is a heap object - get map.
4099 Result temp = allocator()->Allocate();
4100 ASSERT(temp.is_valid());
4101 // if (!object->IsJSValue()) return object.
4102 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4103 leave.Branch(not_equal);
4104 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4105 object.Unuse();
4106 frame_->SetElementAt(0, &temp);
4107 leave.Bind();
4108}
4109
4110
4111// -----------------------------------------------------------------------------
4112// CodeGenerator implementation of Expressions
4113
4114void CodeGenerator::LoadAndSpill(Expression* expression,
4115 TypeofState typeof_state) {
4116 // TODO(x64): No architecture specific code. Move to shared location.
4117 ASSERT(in_spilled_code());
4118 set_in_spilled_code(false);
4119 Load(expression, typeof_state);
4120 frame_->SpillAll();
4121 set_in_spilled_code(true);
4122}
4123
4124
4125void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
4126#ifdef DEBUG
4127 int original_height = frame_->height();
4128#endif
4129 ASSERT(!in_spilled_code());
4130 JumpTarget true_target;
4131 JumpTarget false_target;
4132 ControlDestination dest(&true_target, &false_target, true);
4133 LoadCondition(x, typeof_state, &dest, false);
4134
4135 if (dest.false_was_fall_through()) {
4136 // The false target was just bound.
4137 JumpTarget loaded;
4138 frame_->Push(Factory::false_value());
4139 // There may be dangling jumps to the true target.
4140 if (true_target.is_linked()) {
4141 loaded.Jump();
4142 true_target.Bind();
4143 frame_->Push(Factory::true_value());
4144 loaded.Bind();
4145 }
4146
4147 } else if (dest.is_used()) {
4148 // There is true, and possibly false, control flow (with true as
4149 // the fall through).
4150 JumpTarget loaded;
4151 frame_->Push(Factory::true_value());
4152 if (false_target.is_linked()) {
4153 loaded.Jump();
4154 false_target.Bind();
4155 frame_->Push(Factory::false_value());
4156 loaded.Bind();
4157 }
4158
4159 } else {
4160 // We have a valid value on top of the frame, but we still may
4161 // have dangling jumps to the true and false targets from nested
4162 // subexpressions (eg, the left subexpressions of the
4163 // short-circuited boolean operators).
4164 ASSERT(has_valid_frame());
4165 if (true_target.is_linked() || false_target.is_linked()) {
4166 JumpTarget loaded;
4167 loaded.Jump(); // Don't lose the current TOS.
4168 if (true_target.is_linked()) {
4169 true_target.Bind();
4170 frame_->Push(Factory::true_value());
4171 if (false_target.is_linked()) {
4172 loaded.Jump();
4173 }
4174 }
4175 if (false_target.is_linked()) {
4176 false_target.Bind();
4177 frame_->Push(Factory::false_value());
4178 }
4179 loaded.Bind();
4180 }
4181 }
4182
4183 ASSERT(has_valid_frame());
4184 ASSERT(frame_->height() == original_height + 1);
4185}
4186
4187
4188// Emit code to load the value of an expression to the top of the
4189// frame. If the expression is boolean-valued it may be compiled (or
4190// partially compiled) into control flow to the control destination.
4191// If force_control is true, control flow is forced.
4192void CodeGenerator::LoadCondition(Expression* x,
4193 TypeofState typeof_state,
4194 ControlDestination* dest,
4195 bool force_control) {
4196 ASSERT(!in_spilled_code());
4197 int original_height = frame_->height();
4198
4199 { CodeGenState new_state(this, typeof_state, dest);
4200 Visit(x);
4201
4202 // If we hit a stack overflow, we may not have actually visited
4203 // the expression. In that case, we ensure that we have a
4204 // valid-looking frame state because we will continue to generate
4205 // code as we unwind the C++ stack.
4206 //
4207 // It's possible to have both a stack overflow and a valid frame
4208 // state (eg, a subexpression overflowed, visiting it returned
4209 // with a dummied frame state, and visiting this expression
4210 // returned with a normal-looking state).
4211 if (HasStackOverflow() &&
4212 !dest->is_used() &&
4213 frame_->height() == original_height) {
4214 dest->Goto(true);
4215 }
4216 }
4217
4218 if (force_control && !dest->is_used()) {
4219 // Convert the TOS value into flow to the control destination.
4220 // TODO(X64): Make control flow to control destinations work.
4221 ToBoolean(dest);
4222 }
4223
4224 ASSERT(!(force_control && !dest->is_used()));
4225 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4226}
4227
4228
4229class ToBooleanStub: public CodeStub {
4230 public:
4231 ToBooleanStub() { }
4232
4233 void Generate(MacroAssembler* masm);
4234
4235 private:
4236 Major MajorKey() { return ToBoolean; }
4237 int MinorKey() { return 0; }
4238};
4239
4240
4241// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4242// convert it to a boolean in the condition code register or jump to
4243// 'false_target'/'true_target' as appropriate.
4244void CodeGenerator::ToBoolean(ControlDestination* dest) {
4245 Comment cmnt(masm_, "[ ToBoolean");
4246
4247 // The value to convert should be popped from the frame.
4248 Result value = frame_->Pop();
4249 value.ToRegister();
4250 // Fast case checks.
4251
4252 // 'false' => false.
4253 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4254 dest->false_target()->Branch(equal);
4255
4256 // 'true' => true.
4257 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4258 dest->true_target()->Branch(equal);
4259
4260 // 'undefined' => false.
4261 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4262 dest->false_target()->Branch(equal);
4263
4264 // Smi => false iff zero.
4265 Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
4266 dest->false_target()->Branch(equals);
4267 Condition is_smi = masm_->CheckSmi(value.reg());
4268 dest->true_target()->Branch(is_smi);
4269
4270 // Call the stub for all other cases.
4271 frame_->Push(&value); // Undo the Pop() from above.
4272 ToBooleanStub stub;
4273 Result temp = frame_->CallStub(&stub, 1);
4274 // Convert the result to a condition code.
4275 __ testq(temp.reg(), temp.reg());
4276 temp.Unuse();
4277 dest->Split(not_equal);
4278}
4279
4280
4281void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4282 UNIMPLEMENTED();
4283 // TODO(X64): Implement security policy for loads of smis.
4284}
4285
4286
4287bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4288 return false;
4289}
4290
4291//------------------------------------------------------------------------------
4292// CodeGenerator implementation of variables, lookups, and stores.
4293
4294Reference::Reference(CodeGenerator* cgen, Expression* expression)
4295 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
4296 cgen->LoadReference(this);
4297}
4298
4299
4300Reference::~Reference() {
4301 cgen_->UnloadReference(this);
4302}
4303
4304
4305void CodeGenerator::LoadReference(Reference* ref) {
4306 // References are loaded from both spilled and unspilled code. Set the
4307 // state to unspilled to allow that (and explicitly spill after
4308 // construction at the construction sites).
4309 bool was_in_spilled_code = in_spilled_code_;
4310 in_spilled_code_ = false;
4311
4312 Comment cmnt(masm_, "[ LoadReference");
4313 Expression* e = ref->expression();
4314 Property* property = e->AsProperty();
4315 Variable* var = e->AsVariableProxy()->AsVariable();
4316
4317 if (property != NULL) {
4318 // The expression is either a property or a variable proxy that rewrites
4319 // to a property.
4320 Load(property->obj());
4321 // We use a named reference if the key is a literal symbol, unless it is
4322 // a string that can be legally parsed as an integer. This is because
4323 // otherwise we will not get into the slow case code that handles [] on
4324 // String objects.
4325 Literal* literal = property->key()->AsLiteral();
4326 uint32_t dummy;
4327 if (literal != NULL &&
4328 literal->handle()->IsSymbol() &&
4329 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
4330 ref->set_type(Reference::NAMED);
4331 } else {
4332 Load(property->key());
4333 ref->set_type(Reference::KEYED);
4334 }
4335 } else if (var != NULL) {
4336 // The expression is a variable proxy that does not rewrite to a
4337 // property. Global variables are treated as named property references.
4338 if (var->is_global()) {
4339 LoadGlobal();
4340 ref->set_type(Reference::NAMED);
4341 } else {
4342 ASSERT(var->slot() != NULL);
4343 ref->set_type(Reference::SLOT);
4344 }
4345 } else {
4346 // Anything else is a runtime error.
4347 Load(e);
4348 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4349 }
4350
4351 in_spilled_code_ = was_in_spilled_code;
4352}
4353
4354
4355void CodeGenerator::UnloadReference(Reference* ref) {
4356 // Pop a reference from the stack while preserving TOS.
4357 Comment cmnt(masm_, "[ UnloadReference");
4358 frame_->Nip(ref->size());
4359}
4360
4361
4362Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4363 // Currently, this assertion will fail if we try to assign to
4364 // a constant variable that is constant because it is read-only
4365 // (such as the variable referring to a named function expression).
4366 // We need to implement assignments to read-only variables.
4367 // Ideally, we should do this during AST generation (by converting
4368 // such assignments into expression statements); however, in general
4369 // we may not be able to make the decision until past AST generation,
4370 // that is when the entire program is known.
4371 ASSERT(slot != NULL);
4372 int index = slot->index();
4373 switch (slot->type()) {
4374 case Slot::PARAMETER:
4375 return frame_->ParameterAt(index);
4376
4377 case Slot::LOCAL:
4378 return frame_->LocalAt(index);
4379
4380 case Slot::CONTEXT: {
4381 // Follow the context chain if necessary.
4382 ASSERT(!tmp.is(rsi)); // do not overwrite context register
4383 Register context = rsi;
4384 int chain_length = scope()->ContextChainLength(slot->var()->scope());
4385 for (int i = 0; i < chain_length; i++) {
4386 // Load the closure.
4387 // (All contexts, even 'with' contexts, have a closure,
4388 // and it is the same for all contexts inside a function.
4389 // There is no need to go to the function context first.)
4390 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4391 // Load the function context (which is the incoming, outer context).
4392 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4393 context = tmp;
4394 }
4395 // We may have a 'with' context now. Get the function context.
4396 // (In fact this mov may never be the needed, since the scope analysis
4397 // may not permit a direct context access in this case and thus we are
4398 // always at a function context. However it is safe to dereference be-
4399 // cause the function context of a function context is itself. Before
4400 // deleting this mov we should try to create a counter-example first,
4401 // though...)
4402 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4403 return ContextOperand(tmp, index);
4404 }
4405
4406 default:
4407 UNREACHABLE();
4408 return Operand(rsp, 0);
4409 }
4410}
4411
4412
4413Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4414 Result tmp,
4415 JumpTarget* slow) {
4416 ASSERT(slot->type() == Slot::CONTEXT);
4417 ASSERT(tmp.is_register());
4418 Register context = rsi;
4419
4420 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4421 if (s->num_heap_slots() > 0) {
4422 if (s->calls_eval()) {
4423 // Check that extension is NULL.
4424 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4425 Immediate(0));
4426 slow->Branch(not_equal, not_taken);
4427 }
4428 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4429 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4430 context = tmp.reg();
4431 }
4432 }
4433 // Check that last extension is NULL.
4434 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4435 slow->Branch(not_equal, not_taken);
4436 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4437 return ContextOperand(tmp.reg(), slot->index());
4438}
4439
4440
4441void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4442 if (slot->type() == Slot::LOOKUP) {
4443 ASSERT(slot->var()->is_dynamic());
4444
4445 JumpTarget slow;
4446 JumpTarget done;
4447 Result value;
4448
4449 // Generate fast-case code for variables that might be shadowed by
4450 // eval-introduced variables. Eval is used a lot without
4451 // introducing variables. In those cases, we do not want to
4452 // perform a runtime call for all variables in the scope
4453 // containing the eval.
4454 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4455 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4456 // If there was no control flow to slow, we can exit early.
4457 if (!slow.is_linked()) {
4458 frame_->Push(&value);
4459 return;
4460 }
4461
4462 done.Jump(&value);
4463
4464 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4465 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4466 // Only generate the fast case for locals that rewrite to slots.
4467 // This rules out argument loads.
4468 if (potential_slot != NULL) {
4469 // Allocate a fresh register to use as a temp in
4470 // ContextSlotOperandCheckExtensions and to hold the result
4471 // value.
4472 value = allocator_->Allocate();
4473 ASSERT(value.is_valid());
4474 __ movq(value.reg(),
4475 ContextSlotOperandCheckExtensions(potential_slot,
4476 value,
4477 &slow));
4478 if (potential_slot->var()->mode() == Variable::CONST) {
4479 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4480 done.Branch(not_equal, &value);
4481 __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4482 }
4483 // There is always control flow to slow from
4484 // ContextSlotOperandCheckExtensions so we have to jump around
4485 // it.
4486 done.Jump(&value);
4487 }
4488 }
4489
4490 slow.Bind();
4491 // A runtime call is inevitable. We eagerly sync frame elements
4492 // to memory so that we can push the arguments directly into place
4493 // on top of the frame.
4494 frame_->SyncRange(0, frame_->element_count() - 1);
4495 frame_->EmitPush(rsi);
4496 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4497 frame_->EmitPush(kScratchRegister);
4498 if (typeof_state == INSIDE_TYPEOF) {
4499 value =
4500 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4501 } else {
4502 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4503 }
4504
4505 done.Bind(&value);
4506 frame_->Push(&value);
4507
4508 } else if (slot->var()->mode() == Variable::CONST) {
4509 // Const slots may contain 'the hole' value (the constant hasn't been
4510 // initialized yet) which needs to be converted into the 'undefined'
4511 // value.
4512 //
4513 // We currently spill the virtual frame because constants use the
4514 // potentially unsafe direct-frame access of SlotOperand.
4515 VirtualFrame::SpilledScope spilled_scope;
4516 Comment cmnt(masm_, "[ Load const");
4517 JumpTarget exit;
4518 __ movq(rcx, SlotOperand(slot, rcx));
4519 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4520 exit.Branch(not_equal);
4521 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4522 exit.Bind();
4523 frame_->EmitPush(rcx);
4524
4525 } else if (slot->type() == Slot::PARAMETER) {
4526 frame_->PushParameterAt(slot->index());
4527
4528 } else if (slot->type() == Slot::LOCAL) {
4529 frame_->PushLocalAt(slot->index());
4530
4531 } else {
4532 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4533 // here.
4534 //
4535 // The use of SlotOperand below is safe for an unspilled frame
4536 // because it will always be a context slot.
4537 ASSERT(slot->type() == Slot::CONTEXT);
4538 Result temp = allocator_->Allocate();
4539 ASSERT(temp.is_valid());
4540 __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4541 frame_->Push(&temp);
4542 }
4543}
4544
4545
4546void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4547 TypeofState state) {
4548 LoadFromSlot(slot, state);
4549
4550 // Bail out quickly if we're not using lazy arguments allocation.
4551 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4552
4553 // ... or if the slot isn't a non-parameter arguments slot.
4554 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4555
4556 // Pop the loaded value from the stack.
4557 Result value = frame_->Pop();
4558
4559 // If the loaded value is a constant, we know if the arguments
4560 // object has been lazily loaded yet.
4561 if (value.is_constant()) {
4562 if (value.handle()->IsTheHole()) {
4563 Result arguments = StoreArgumentsObject(false);
4564 frame_->Push(&arguments);
4565 } else {
4566 frame_->Push(&value);
4567 }
4568 return;
4569 }
4570
4571 // The loaded value is in a register. If it is the sentinel that
4572 // indicates that we haven't loaded the arguments object yet, we
4573 // need to do it now.
4574 JumpTarget exit;
4575 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4576 frame_->Push(&value);
4577 exit.Branch(not_equal);
4578 Result arguments = StoreArgumentsObject(false);
4579 frame_->SetElementAt(0, &arguments);
4580 exit.Bind();
4581}
4582
4583
4584void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4585 if (slot->type() == Slot::LOOKUP) {
4586 ASSERT(slot->var()->is_dynamic());
4587
4588 // For now, just do a runtime call. Since the call is inevitable,
4589 // we eagerly sync the virtual frame so we can directly push the
4590 // arguments into place.
4591 frame_->SyncRange(0, frame_->element_count() - 1);
4592
4593 frame_->EmitPush(rsi);
4594 frame_->EmitPush(slot->var()->name());
4595
4596 Result value;
4597 if (init_state == CONST_INIT) {
4598 // Same as the case for a normal store, but ignores attribute
4599 // (e.g. READ_ONLY) of context slot so that we can initialize const
4600 // properties (introduced via eval("const foo = (some expr);")). Also,
4601 // uses the current function context instead of the top context.
4602 //
4603 // Note that we must declare the foo upon entry of eval(), via a
4604 // context slot declaration, but we cannot initialize it at the same
4605 // time, because the const declaration may be at the end of the eval
4606 // code (sigh...) and the const variable may have been used before
4607 // (where its value is 'undefined'). Thus, we can only do the
4608 // initialization when we actually encounter the expression and when
4609 // the expression operands are defined and valid, and thus we need the
4610 // split into 2 operations: declaration of the context slot followed
4611 // by initialization.
4612 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4613 } else {
4614 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4615 }
4616 // Storing a variable must keep the (new) value on the expression
4617 // stack. This is necessary for compiling chained assignment
4618 // expressions.
4619 frame_->Push(&value);
4620 } else {
4621 ASSERT(!slot->var()->is_dynamic());
4622
4623 JumpTarget exit;
4624 if (init_state == CONST_INIT) {
4625 ASSERT(slot->var()->mode() == Variable::CONST);
4626 // Only the first const initialization must be executed (the slot
4627 // still contains 'the hole' value). When the assignment is executed,
4628 // the code is identical to a normal store (see below).
4629 //
4630 // We spill the frame in the code below because the direct-frame
4631 // access of SlotOperand is potentially unsafe with an unspilled
4632 // frame.
4633 VirtualFrame::SpilledScope spilled_scope;
4634 Comment cmnt(masm_, "[ Init const");
4635 __ movq(rcx, SlotOperand(slot, rcx));
4636 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4637 exit.Branch(not_equal);
4638 }
4639
4640 // We must execute the store. Storing a variable must keep the (new)
4641 // value on the stack. This is necessary for compiling assignment
4642 // expressions.
4643 //
4644 // Note: We will reach here even with slot->var()->mode() ==
4645 // Variable::CONST because of const declarations which will initialize
4646 // consts to 'the hole' value and by doing so, end up calling this code.
4647 if (slot->type() == Slot::PARAMETER) {
4648 frame_->StoreToParameterAt(slot->index());
4649 } else if (slot->type() == Slot::LOCAL) {
4650 frame_->StoreToLocalAt(slot->index());
4651 } else {
4652 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4653 //
4654 // The use of SlotOperand below is safe for an unspilled frame
4655 // because the slot is a context slot.
4656 ASSERT(slot->type() == Slot::CONTEXT);
4657 frame_->Dup();
4658 Result value = frame_->Pop();
4659 value.ToRegister();
4660 Result start = allocator_->Allocate();
4661 ASSERT(start.is_valid());
4662 __ movq(SlotOperand(slot, start.reg()), value.reg());
4663 // RecordWrite may destroy the value registers.
4664 //
4665 // TODO(204): Avoid actually spilling when the value is not
4666 // needed (probably the common case).
4667 frame_->Spill(value.reg());
4668 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4669 Result temp = allocator_->Allocate();
4670 ASSERT(temp.is_valid());
4671 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4672 // The results start, value, and temp are unused by going out of
4673 // scope.
4674 }
4675
4676 exit.Bind();
4677 }
4678}
4679
4680
4681Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4682 Slot* slot,
4683 TypeofState typeof_state,
4684 JumpTarget* slow) {
4685 // Check that no extension objects have been created by calls to
4686 // eval from the current scope to the global scope.
4687 Register context = rsi;
4688 Result tmp = allocator_->Allocate();
4689 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
4690
4691 Scope* s = scope();
4692 while (s != NULL) {
4693 if (s->num_heap_slots() > 0) {
4694 if (s->calls_eval()) {
4695 // Check that extension is NULL.
4696 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4697 Immediate(0));
4698 slow->Branch(not_equal, not_taken);
4699 }
4700 // Load next context in chain.
4701 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4702 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4703 context = tmp.reg();
4704 }
4705 // If no outer scope calls eval, we do not need to check more
4706 // context extensions. If we have reached an eval scope, we check
4707 // all extensions from this point.
4708 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4709 s = s->outer_scope();
4710 }
4711
4712 if (s->is_eval_scope()) {
4713 // Loop up the context chain. There is no frame effect so it is
4714 // safe to use raw labels here.
4715 Label next, fast;
4716 if (!context.is(tmp.reg())) {
4717 __ movq(tmp.reg(), context);
4718 }
4719 // Load map for comparison into register, outside loop.
4720 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4721 __ bind(&next);
4722 // Terminate at global context.
4723 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4724 __ j(equal, &fast);
4725 // Check that extension is NULL.
4726 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4727 slow->Branch(not_equal);
4728 // Load next context in chain.
4729 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4730 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4731 __ jmp(&next);
4732 __ bind(&fast);
4733 }
4734 tmp.Unuse();
4735
4736 // All extension objects were empty and it is safe to use a global
4737 // load IC call.
4738 LoadGlobal();
4739 frame_->Push(slot->var()->name());
4740 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4741 ? RelocInfo::CODE_TARGET
4742 : RelocInfo::CODE_TARGET_CONTEXT;
4743 Result answer = frame_->CallLoadIC(mode);
4744 // A test rax instruction following the call signals that the inobject
4745 // property case was inlined. Ensure that there is not a test rax
4746 // instruction here.
4747 masm_->nop();
4748 // Discard the global object. The result is in answer.
4749 frame_->Drop();
4750 return answer;
4751}
4752
4753
4754void CodeGenerator::LoadGlobal() {
4755 if (in_spilled_code()) {
4756 frame_->EmitPush(GlobalObject());
4757 } else {
4758 Result temp = allocator_->Allocate();
4759 __ movq(temp.reg(), GlobalObject());
4760 frame_->Push(&temp);
4761 }
4762}
4763
4764
4765void CodeGenerator::LoadGlobalReceiver() {
4766 Result temp = allocator_->Allocate();
4767 Register reg = temp.reg();
4768 __ movq(reg, GlobalObject());
4769 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
4770 frame_->Push(&temp);
4771}
4772
4773
4774ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
4775 if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
4776 ASSERT(scope_->arguments_shadow() != NULL);
4777 // We don't want to do lazy arguments allocation for functions that
4778 // have heap-allocated contexts, because it interfers with the
4779 // uninitialized const tracking in the context objects.
4780 return (scope_->num_heap_slots() > 0)
4781 ? EAGER_ARGUMENTS_ALLOCATION
4782 : LAZY_ARGUMENTS_ALLOCATION;
4783}
4784
4785
4786Result CodeGenerator::StoreArgumentsObject(bool initial) {
4787 ArgumentsAllocationMode mode = ArgumentsMode();
4788 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
4789
4790 Comment cmnt(masm_, "[ store arguments object");
4791 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
4792 // When using lazy arguments allocation, we store the hole value
4793 // as a sentinel indicating that the arguments object hasn't been
4794 // allocated yet.
4795 frame_->Push(Factory::the_hole_value());
4796 } else {
4797 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
4798 frame_->PushFunction();
4799 frame_->PushReceiverSlotAddress();
4800 frame_->Push(Smi::FromInt(scope_->num_parameters()));
4801 Result result = frame_->CallStub(&stub, 3);
4802 frame_->Push(&result);
4803 }
4804
4805 { Reference shadow_ref(this, scope_->arguments_shadow());
4806 Reference arguments_ref(this, scope_->arguments());
4807 ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
4808 // Here we rely on the convenient property that references to slot
4809 // take up zero space in the frame (ie, it doesn't matter that the
4810 // stored value is actually below the reference on the frame).
4811 JumpTarget done;
4812 bool skip_arguments = false;
4813 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
4814 // We have to skip storing into the arguments slot if it has
4815 // already been written to. This can happen if the a function
4816 // has a local variable named 'arguments'.
4817 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
4818 Result arguments = frame_->Pop();
4819 if (arguments.is_constant()) {
4820 // We have to skip updating the arguments object if it has
4821 // been assigned a proper value.
4822 skip_arguments = !arguments.handle()->IsTheHole();
4823 } else {
4824 __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
4825 arguments.Unuse();
4826 done.Branch(not_equal);
4827 }
4828 }
4829 if (!skip_arguments) {
4830 arguments_ref.SetValue(NOT_CONST_INIT);
4831 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
4832 }
4833 shadow_ref.SetValue(NOT_CONST_INIT);
4834 }
4835 return frame_->Pop();
4836}
4837
4838
4839// TODO(1241834): Get rid of this function in favor of just using Load, now
4840// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
4841// variables w/o reference errors elsewhere.
4842void CodeGenerator::LoadTypeofExpression(Expression* x) {
4843 Variable* variable = x->AsVariableProxy()->AsVariable();
4844 if (variable != NULL && !variable->is_this() && variable->is_global()) {
4845 // NOTE: This is somewhat nasty. We force the compiler to load
4846 // the variable as if through '<global>.<variable>' to make sure we
4847 // do not get reference errors.
4848 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
4849 Literal key(variable->name());
4850 // TODO(1241834): Fetch the position from the variable instead of using
4851 // no position.
4852 Property property(&global, &key, RelocInfo::kNoPosition);
4853 Load(&property);
4854 } else {
4855 Load(x, INSIDE_TYPEOF);
4856 }
4857}
4858
4859
4860void CodeGenerator::Comparison(Condition cc,
4861 bool strict,
4862 ControlDestination* dest) {
4863 // Strict only makes sense for equality comparisons.
4864 ASSERT(!strict || cc == equal);
4865
4866 Result left_side;
4867 Result right_side;
4868 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
4869 if (cc == greater || cc == less_equal) {
4870 cc = ReverseCondition(cc);
4871 left_side = frame_->Pop();
4872 right_side = frame_->Pop();
4873 } else {
4874 right_side = frame_->Pop();
4875 left_side = frame_->Pop();
4876 }
4877 ASSERT(cc == less || cc == equal || cc == greater_equal);
4878
4879 // If either side is a constant smi, optimize the comparison.
4880 bool left_side_constant_smi =
4881 left_side.is_constant() && left_side.handle()->IsSmi();
4882 bool right_side_constant_smi =
4883 right_side.is_constant() && right_side.handle()->IsSmi();
4884 bool left_side_constant_null =
4885 left_side.is_constant() && left_side.handle()->IsNull();
4886 bool right_side_constant_null =
4887 right_side.is_constant() && right_side.handle()->IsNull();
4888
4889 if (left_side_constant_smi || right_side_constant_smi) {
4890 if (left_side_constant_smi && right_side_constant_smi) {
4891 // Trivial case, comparing two constants.
4892 int left_value = Smi::cast(*left_side.handle())->value();
4893 int right_value = Smi::cast(*right_side.handle())->value();
4894 switch (cc) {
4895 case less:
4896 dest->Goto(left_value < right_value);
4897 break;
4898 case equal:
4899 dest->Goto(left_value == right_value);
4900 break;
4901 case greater_equal:
4902 dest->Goto(left_value >= right_value);
4903 break;
4904 default:
4905 UNREACHABLE();
4906 }
4907 } else { // Only one side is a constant Smi.
4908 // If left side is a constant Smi, reverse the operands.
4909 // Since one side is a constant Smi, conversion order does not matter.
4910 if (left_side_constant_smi) {
4911 Result temp = left_side;
4912 left_side = right_side;
4913 right_side = temp;
4914 cc = ReverseCondition(cc);
4915 // This may reintroduce greater or less_equal as the value of cc.
4916 // CompareStub and the inline code both support all values of cc.
4917 }
4918 // Implement comparison against a constant Smi, inlining the case
4919 // where both sides are Smis.
4920 left_side.ToRegister();
4921
4922 // Here we split control flow to the stub call and inlined cases
4923 // before finally splitting it to the control destination. We use
4924 // a jump target and branching to duplicate the virtual frame at
4925 // the first split. We manually handle the off-frame references
4926 // by reconstituting them on the non-fall-through path.
4927 JumpTarget is_smi;
4928 Register left_reg = left_side.reg();
4929 Handle<Object> right_val = right_side.handle();
4930
4931 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
4932 is_smi.Branch(left_is_smi);
4933
4934 // Setup and call the compare stub.
4935 CompareStub stub(cc, strict);
4936 Result result = frame_->CallStub(&stub, &left_side, &right_side);
4937 result.ToRegister();
4938 __ testq(result.reg(), result.reg());
4939 result.Unuse();
4940 dest->true_target()->Branch(cc);
4941 dest->false_target()->Jump();
4942
4943 is_smi.Bind();
4944 left_side = Result(left_reg);
4945 right_side = Result(right_val);
4946 // Test smi equality and comparison by signed int comparison.
4947 // Both sides are smis, so we can use an Immediate.
4948 __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
4949 left_side.Unuse();
4950 right_side.Unuse();
4951 dest->Split(cc);
4952 }
4953 } else if (cc == equal &&
4954 (left_side_constant_null || right_side_constant_null)) {
4955 // To make null checks efficient, we check if either the left side or
4956 // the right side is the constant 'null'.
4957 // If so, we optimize the code by inlining a null check instead of
4958 // calling the (very) general runtime routine for checking equality.
4959 Result operand = left_side_constant_null ? right_side : left_side;
4960 right_side.Unuse();
4961 left_side.Unuse();
4962 operand.ToRegister();
4963 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
4964 if (strict) {
4965 operand.Unuse();
4966 dest->Split(equal);
4967 } else {
4968 // The 'null' value is only equal to 'undefined' if using non-strict
4969 // comparisons.
4970 dest->true_target()->Branch(equal);
4971 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
4972 dest->true_target()->Branch(equal);
4973 Condition is_smi = masm_->CheckSmi(operand.reg());
4974 dest->false_target()->Branch(is_smi);
4975
4976 // It can be an undetectable object.
4977 // Use a scratch register in preference to spilling operand.reg().
4978 Result temp = allocator()->Allocate();
4979 ASSERT(temp.is_valid());
4980 __ movq(temp.reg(),
4981 FieldOperand(operand.reg(), HeapObject::kMapOffset));
4982 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
4983 Immediate(1 << Map::kIsUndetectable));
4984 temp.Unuse();
4985 operand.Unuse();
4986 dest->Split(not_zero);
4987 }
4988 } else { // Neither side is a constant Smi or null.
4989 // If either side is a non-smi constant, skip the smi check.
4990 bool known_non_smi =
4991 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
4992 (right_side.is_constant() && !right_side.handle()->IsSmi());
4993 left_side.ToRegister();
4994 right_side.ToRegister();
4995
4996 if (known_non_smi) {
4997 // When non-smi, call out to the compare stub.
4998 CompareStub stub(cc, strict);
4999 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5000 // The result is a Smi, which is negative, zero, or positive.
5001 __ testl(answer.reg(), answer.reg()); // Both zero and sign flag right.
5002 answer.Unuse();
5003 dest->Split(cc);
5004 } else {
5005 // Here we split control flow to the stub call and inlined cases
5006 // before finally splitting it to the control destination. We use
5007 // a jump target and branching to duplicate the virtual frame at
5008 // the first split. We manually handle the off-frame references
5009 // by reconstituting them on the non-fall-through path.
5010 JumpTarget is_smi;
5011 Register left_reg = left_side.reg();
5012 Register right_reg = right_side.reg();
5013
5014 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5015 is_smi.Branch(both_smi);
5016 // When non-smi, call out to the compare stub.
5017 CompareStub stub(cc, strict);
5018 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5019 __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags.
5020 answer.Unuse();
5021 dest->true_target()->Branch(cc);
5022 dest->false_target()->Jump();
5023
5024 is_smi.Bind();
5025 left_side = Result(left_reg);
5026 right_side = Result(right_reg);
5027 __ cmpl(left_side.reg(), right_side.reg());
5028 right_side.Unuse();
5029 left_side.Unuse();
5030 dest->Split(cc);
5031 }
5032 }
5033}
5034
5035
5036class DeferredInlineBinaryOperation: public DeferredCode {
5037 public:
5038 DeferredInlineBinaryOperation(Token::Value op,
5039 Register dst,
5040 Register left,
5041 Register right,
5042 OverwriteMode mode)
5043 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5044 set_comment("[ DeferredInlineBinaryOperation");
5045 }
5046
5047 virtual void Generate();
5048
5049 private:
5050 Token::Value op_;
5051 Register dst_;
5052 Register left_;
5053 Register right_;
5054 OverwriteMode mode_;
5055};
5056
5057
5058void DeferredInlineBinaryOperation::Generate() {
5059 __ push(left_);
5060 __ push(right_);
5061 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
5062 __ CallStub(&stub);
5063 if (!dst_.is(rax)) __ movq(dst_, rax);
5064}
5065
5066
5067void CodeGenerator::GenericBinaryOperation(Token::Value op,
5068 SmiAnalysis* type,
5069 OverwriteMode overwrite_mode) {
5070 Comment cmnt(masm_, "[ BinaryOperation");
5071 Comment cmnt_token(masm_, Token::String(op));
5072
5073 if (op == Token::COMMA) {
5074 // Simply discard left value.
5075 frame_->Nip(1);
5076 return;
5077 }
5078
5079 // Set the flags based on the operation, type and loop nesting level.
5080 GenericBinaryFlags flags;
5081 switch (op) {
5082 case Token::BIT_OR:
5083 case Token::BIT_AND:
5084 case Token::BIT_XOR:
5085 case Token::SHL:
5086 case Token::SHR:
5087 case Token::SAR:
5088 // Bit operations always assume they likely operate on Smis. Still only
5089 // generate the inline Smi check code if this operation is part of a loop.
5090 flags = (loop_nesting() > 0)
5091 ? SMI_CODE_INLINED
5092 : SMI_CODE_IN_STUB;
5093 break;
5094
5095 default:
5096 // By default only inline the Smi check code for likely smis if this
5097 // operation is part of a loop.
5098 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
5099 ? SMI_CODE_INLINED
5100 : SMI_CODE_IN_STUB;
5101 break;
5102 }
5103
5104 Result right = frame_->Pop();
5105 Result left = frame_->Pop();
5106
5107 if (op == Token::ADD) {
5108 bool left_is_string = left.is_constant() && left.handle()->IsString();
5109 bool right_is_string = right.is_constant() && right.handle()->IsString();
5110 if (left_is_string || right_is_string) {
5111 frame_->Push(&left);
5112 frame_->Push(&right);
5113 Result answer;
5114 if (left_is_string) {
5115 if (right_is_string) {
5116 // TODO(lrn): if both are constant strings
5117 // -- do a compile time cons, if allocation during codegen is allowed.
5118 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
5119 } else {
5120 answer =
5121 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5122 }
5123 } else if (right_is_string) {
5124 answer =
5125 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5126 }
5127 frame_->Push(&answer);
5128 return;
5129 }
5130 // Neither operand is known to be a string.
5131 }
5132
5133 bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
5134 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
5135 bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
5136 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
5137 bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
5138
5139 if (left_is_smi && right_is_smi) {
5140 // Compute the constant result at compile time, and leave it on the frame.
5141 int left_int = Smi::cast(*left.handle())->value();
5142 int right_int = Smi::cast(*right.handle())->value();
5143 if (FoldConstantSmis(op, left_int, right_int)) return;
5144 }
5145
5146 if (left_is_non_smi || right_is_non_smi) {
5147 // Set flag so that we go straight to the slow case, with no smi code.
5148 generate_no_smi_code = true;
5149 } else if (right_is_smi) {
5150 ConstantSmiBinaryOperation(op, &left, right.handle(),
5151 type, false, overwrite_mode);
5152 return;
5153 } else if (left_is_smi) {
5154 ConstantSmiBinaryOperation(op, &right, left.handle(),
5155 type, true, overwrite_mode);
5156 return;
5157 }
5158
5159 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
5160 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5161 } else {
5162 frame_->Push(&left);
5163 frame_->Push(&right);
5164 // If we know the arguments aren't smis, use the binary operation stub
5165 // that does not check for the fast smi case.
5166 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
5167 if (generate_no_smi_code) {
5168 flags = SMI_CODE_INLINED;
5169 }
5170 GenericBinaryOpStub stub(op, overwrite_mode, flags);
5171 Result answer = frame_->CallStub(&stub, 2);
5172 frame_->Push(&answer);
5173 }
5174}
5175
5176
5177// Emit a LoadIC call to get the value from receiver and leave it in
5178// dst. The receiver register is restored after the call.
5179class DeferredReferenceGetNamedValue: public DeferredCode {
5180 public:
5181 DeferredReferenceGetNamedValue(Register dst,
5182 Register receiver,
5183 Handle<String> name)
5184 : dst_(dst), receiver_(receiver), name_(name) {
5185 set_comment("[ DeferredReferenceGetNamedValue");
5186 }
5187
5188 virtual void Generate();
5189
5190 Label* patch_site() { return &patch_site_; }
5191
5192 private:
5193 Label patch_site_;
5194 Register dst_;
5195 Register receiver_;
5196 Handle<String> name_;
5197};
5198
5199
5200void DeferredReferenceGetNamedValue::Generate() {
5201 __ push(receiver_);
5202 __ Move(rcx, name_);
5203 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5204 __ Call(ic, RelocInfo::CODE_TARGET);
5205 // The call must be followed by a test rax instruction to indicate
5206 // that the inobject property case was inlined.
5207 //
5208 // Store the delta to the map check instruction here in the test
5209 // instruction. Use masm_-> instead of the __ macro since the
5210 // latter can't return a value.
5211 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5212 // Here we use masm_-> instead of the __ macro because this is the
5213 // instruction that gets patched and coverage code gets in the way.
5214 masm_->testl(rax, Immediate(-delta_to_patch_site));
5215 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5216
5217 if (!dst_.is(rax)) __ movq(dst_, rax);
5218 __ pop(receiver_);
5219}
5220
5221
5222void DeferredInlineSmiAdd::Generate() {
5223 __ push(dst_);
5224 __ push(Immediate(value_));
5225 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5226 __ CallStub(&igostub);
5227 if (!dst_.is(rax)) __ movq(dst_, rax);
5228}
5229
5230
5231void DeferredInlineSmiAddReversed::Generate() {
5232 __ push(Immediate(value_)); // Note: sign extended.
5233 __ push(dst_);
5234 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5235 __ CallStub(&igostub);
5236 if (!dst_.is(rax)) __ movq(dst_, rax);
5237}
5238
5239
5240void DeferredInlineSmiSub::Generate() {
5241 __ push(dst_);
5242 __ push(Immediate(value_)); // Note: sign extended.
5243 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
5244 __ CallStub(&igostub);
5245 if (!dst_.is(rax)) __ movq(dst_, rax);
5246}
5247
5248
5249void DeferredInlineSmiOperation::Generate() {
5250 __ push(src_);
5251 __ push(Immediate(value_)); // Note: sign extended.
5252 // For mod we don't generate all the Smi code inline.
5253 GenericBinaryOpStub stub(
5254 op_,
5255 overwrite_mode_,
5256 (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
5257 __ CallStub(&stub);
5258 if (!dst_.is(rax)) __ movq(dst_, rax);
5259}
5260
5261
5262void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5263 Result* operand,
5264 Handle<Object> value,
5265 SmiAnalysis* type,
5266 bool reversed,
5267 OverwriteMode overwrite_mode) {
5268 // NOTE: This is an attempt to inline (a bit) more of the code for
5269 // some possible smi operations (like + and -) when (at least) one
5270 // of the operands is a constant smi.
5271 // Consumes the argument "operand".
5272
5273 // TODO(199): Optimize some special cases of operations involving a
5274 // smi literal (multiply by 2, shift by 0, etc.).
5275 if (IsUnsafeSmi(value)) {
5276 Result unsafe_operand(value);
5277 if (reversed) {
5278 LikelySmiBinaryOperation(op, &unsafe_operand, operand,
5279 overwrite_mode);
5280 } else {
5281 LikelySmiBinaryOperation(op, operand, &unsafe_operand,
5282 overwrite_mode);
5283 }
5284 ASSERT(!operand->is_valid());
5285 return;
5286 }
5287
5288 // Get the literal value.
5289 Smi* smi_value = Smi::cast(*value);
5290 int int_value = smi_value->value();
5291
5292 switch (op) {
5293 case Token::ADD: {
5294 operand->ToRegister();
5295 frame_->Spill(operand->reg());
5296 DeferredCode* deferred = NULL;
5297 if (reversed) {
5298 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5299 smi_value,
5300 overwrite_mode);
5301 } else {
5302 deferred = new DeferredInlineSmiAdd(operand->reg(),
5303 smi_value,
5304 overwrite_mode);
5305 }
5306 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5307 __ SmiAddConstant(operand->reg(),
5308 operand->reg(),
5309 int_value,
5310 deferred->entry_label());
5311 deferred->BindExit();
5312 frame_->Push(operand);
5313 break;
5314 }
5315
5316 case Token::SUB: {
5317 if (reversed) {
5318 Result constant_operand(value);
5319 LikelySmiBinaryOperation(op, &constant_operand, operand,
5320 overwrite_mode);
5321 } else {
5322 operand->ToRegister();
5323 frame_->Spill(operand->reg());
5324 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5325 smi_value,
5326 overwrite_mode);
5327 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5328 // A smi currently fits in a 32-bit Immediate.
5329 __ SmiSubConstant(operand->reg(),
5330 operand->reg(),
5331 int_value,
5332 deferred->entry_label());
5333 deferred->BindExit();
5334 frame_->Push(operand);
5335 }
5336 break;
5337 }
5338
5339 case Token::SAR:
5340 if (reversed) {
5341 Result constant_operand(value);
5342 LikelySmiBinaryOperation(op, &constant_operand, operand,
5343 overwrite_mode);
5344 } else {
5345 // Only the least significant 5 bits of the shift value are used.
5346 // In the slow case, this masking is done inside the runtime call.
5347 int shift_value = int_value & 0x1f;
5348 operand->ToRegister();
5349 frame_->Spill(operand->reg());
5350 DeferredInlineSmiOperation* deferred =
5351 new DeferredInlineSmiOperation(op,
5352 operand->reg(),
5353 operand->reg(),
5354 smi_value,
5355 overwrite_mode);
5356 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5357 __ SmiShiftArithmeticRightConstant(operand->reg(),
5358 operand->reg(),
5359 shift_value);
5360 deferred->BindExit();
5361 frame_->Push(operand);
5362 }
5363 break;
5364
5365 case Token::SHR:
5366 if (reversed) {
5367 Result constant_operand(value);
5368 LikelySmiBinaryOperation(op, &constant_operand, operand,
5369 overwrite_mode);
5370 } else {
5371 // Only the least significant 5 bits of the shift value are used.
5372 // In the slow case, this masking is done inside the runtime call.
5373 int shift_value = int_value & 0x1f;
5374 operand->ToRegister();
5375 Result answer = allocator()->Allocate();
5376 ASSERT(answer.is_valid());
5377 DeferredInlineSmiOperation* deferred =
5378 new DeferredInlineSmiOperation(op,
5379 answer.reg(),
5380 operand->reg(),
5381 smi_value,
5382 overwrite_mode);
5383 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5384 __ SmiShiftLogicalRightConstant(answer.reg(),
5385 operand->reg(),
5386 shift_value,
5387 deferred->entry_label());
5388 deferred->BindExit();
5389 operand->Unuse();
5390 frame_->Push(&answer);
5391 }
5392 break;
5393
5394 case Token::SHL:
5395 if (reversed) {
5396 Result constant_operand(value);
5397 LikelySmiBinaryOperation(op, &constant_operand, operand,
5398 overwrite_mode);
5399 } else {
5400 // Only the least significant 5 bits of the shift value are used.
5401 // In the slow case, this masking is done inside the runtime call.
5402 int shift_value = int_value & 0x1f;
5403 operand->ToRegister();
5404 if (shift_value == 0) {
5405 // Spill operand so it can be overwritten in the slow case.
5406 frame_->Spill(operand->reg());
5407 DeferredInlineSmiOperation* deferred =
5408 new DeferredInlineSmiOperation(op,
5409 operand->reg(),
5410 operand->reg(),
5411 smi_value,
5412 overwrite_mode);
5413 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5414 deferred->BindExit();
5415 frame_->Push(operand);
5416 } else {
5417 // Use a fresh temporary for nonzero shift values.
5418 Result answer = allocator()->Allocate();
5419 ASSERT(answer.is_valid());
5420 DeferredInlineSmiOperation* deferred =
5421 new DeferredInlineSmiOperation(op,
5422 answer.reg(),
5423 operand->reg(),
5424 smi_value,
5425 overwrite_mode);
5426 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5427 __ SmiShiftLeftConstant(answer.reg(),
5428 operand->reg(),
5429 shift_value,
5430 deferred->entry_label());
5431 deferred->BindExit();
5432 operand->Unuse();
5433 frame_->Push(&answer);
5434 }
5435 }
5436 break;
5437
5438 case Token::BIT_OR:
5439 case Token::BIT_XOR:
5440 case Token::BIT_AND: {
5441 operand->ToRegister();
5442 frame_->Spill(operand->reg());
5443 if (reversed) {
5444 // Bit operations with a constant smi are commutative.
5445 // We can swap left and right operands with no problem.
5446 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
5447 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5448 }
5449 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5450 operand->reg(),
5451 operand->reg(),
5452 smi_value,
5453 overwrite_mode);
5454 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5455 if (op == Token::BIT_AND) {
5456 __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
5457 } else if (op == Token::BIT_XOR) {
5458 if (int_value != 0) {
5459 __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
5460 }
5461 } else {
5462 ASSERT(op == Token::BIT_OR);
5463 if (int_value != 0) {
5464 __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
5465 }
5466 }
5467 deferred->BindExit();
5468 frame_->Push(operand);
5469 break;
5470 }
5471
5472 // Generate inline code for mod of powers of 2 and negative powers of 2.
5473 case Token::MOD:
5474 if (!reversed &&
5475 int_value != 0 &&
5476 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5477 operand->ToRegister();
5478 frame_->Spill(operand->reg());
5479 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5480 operand->reg(),
5481 operand->reg(),
5482 smi_value,
5483 overwrite_mode);
5484 // Check for negative or non-Smi left hand side.
5485 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5486 if (int_value < 0) int_value = -int_value;
5487 if (int_value == 1) {
5488 __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
5489 } else {
5490 __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
5491 }
5492 deferred->BindExit();
5493 frame_->Push(operand);
5494 break; // This break only applies if we generated code for MOD.
5495 }
5496 // Fall through if we did not find a power of 2 on the right hand side!
5497 // The next case must be the default.
5498
5499 default: {
5500 Result constant_operand(value);
5501 if (reversed) {
5502 LikelySmiBinaryOperation(op, &constant_operand, operand,
5503 overwrite_mode);
5504 } else {
5505 LikelySmiBinaryOperation(op, operand, &constant_operand,
5506 overwrite_mode);
5507 }
5508 break;
5509 }
5510 }
5511 ASSERT(!operand->is_valid());
5512}
5513
5514void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
5515 Result* left,
5516 Result* right,
5517 OverwriteMode overwrite_mode) {
5518 // Special handling of div and mod because they use fixed registers.
5519 if (op == Token::DIV || op == Token::MOD) {
5520 // We need rax as the quotient register, rdx as the remainder
5521 // register, neither left nor right in rax or rdx, and left copied
5522 // to rax.
5523 Result quotient;
5524 Result remainder;
5525 bool left_is_in_rax = false;
5526 // Step 1: get rax for quotient.
5527 if ((left->is_register() && left->reg().is(rax)) ||
5528 (right->is_register() && right->reg().is(rax))) {
5529 // One or both is in rax. Use a fresh non-rdx register for
5530 // them.
5531 Result fresh = allocator_->Allocate();
5532 ASSERT(fresh.is_valid());
5533 if (fresh.reg().is(rdx)) {
5534 remainder = fresh;
5535 fresh = allocator_->Allocate();
5536 ASSERT(fresh.is_valid());
5537 }
5538 if (left->is_register() && left->reg().is(rax)) {
5539 quotient = *left;
5540 *left = fresh;
5541 left_is_in_rax = true;
5542 }
5543 if (right->is_register() && right->reg().is(rax)) {
5544 quotient = *right;
5545 *right = fresh;
5546 }
5547 __ movq(fresh.reg(), rax);
5548 } else {
5549 // Neither left nor right is in rax.
5550 quotient = allocator_->Allocate(rax);
5551 }
5552 ASSERT(quotient.is_register() && quotient.reg().is(rax));
5553 ASSERT(!(left->is_register() && left->reg().is(rax)));
5554 ASSERT(!(right->is_register() && right->reg().is(rax)));
5555
5556 // Step 2: get rdx for remainder if necessary.
5557 if (!remainder.is_valid()) {
5558 if ((left->is_register() && left->reg().is(rdx)) ||
5559 (right->is_register() && right->reg().is(rdx))) {
5560 Result fresh = allocator_->Allocate();
5561 ASSERT(fresh.is_valid());
5562 if (left->is_register() && left->reg().is(rdx)) {
5563 remainder = *left;
5564 *left = fresh;
5565 }
5566 if (right->is_register() && right->reg().is(rdx)) {
5567 remainder = *right;
5568 *right = fresh;
5569 }
5570 __ movq(fresh.reg(), rdx);
5571 } else {
5572 // Neither left nor right is in rdx.
5573 remainder = allocator_->Allocate(rdx);
5574 }
5575 }
5576 ASSERT(remainder.is_register() && remainder.reg().is(rdx));
5577 ASSERT(!(left->is_register() && left->reg().is(rdx)));
5578 ASSERT(!(right->is_register() && right->reg().is(rdx)));
5579
5580 left->ToRegister();
5581 right->ToRegister();
5582 frame_->Spill(rax);
5583 frame_->Spill(rdx);
5584
5585 // Check that left and right are smi tagged.
5586 DeferredInlineBinaryOperation* deferred =
5587 new DeferredInlineBinaryOperation(op,
5588 (op == Token::DIV) ? rax : rdx,
5589 left->reg(),
5590 right->reg(),
5591 overwrite_mode);
5592 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5593
5594 if (op == Token::DIV) {
5595 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5596 deferred->BindExit();
5597 left->Unuse();
5598 right->Unuse();
5599 frame_->Push(&quotient);
5600 } else {
5601 ASSERT(op == Token::MOD);
5602 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5603 deferred->BindExit();
5604 left->Unuse();
5605 right->Unuse();
5606 frame_->Push(&remainder);
5607 }
5608 return;
5609 }
5610
5611 // Special handling of shift operations because they use fixed
5612 // registers.
5613 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
5614 // Move left out of rcx if necessary.
5615 if (left->is_register() && left->reg().is(rcx)) {
5616 *left = allocator_->Allocate();
5617 ASSERT(left->is_valid());
5618 __ movq(left->reg(), rcx);
5619 }
5620 right->ToRegister(rcx);
5621 left->ToRegister();
5622 ASSERT(left->is_register() && !left->reg().is(rcx));
5623 ASSERT(right->is_register() && right->reg().is(rcx));
5624
5625 // We will modify right, it must be spilled.
5626 frame_->Spill(rcx);
5627
5628 // Use a fresh answer register to avoid spilling the left operand.
5629 Result answer = allocator_->Allocate();
5630 ASSERT(answer.is_valid());
5631 // Check that both operands are smis using the answer register as a
5632 // temporary.
5633 DeferredInlineBinaryOperation* deferred =
5634 new DeferredInlineBinaryOperation(op,
5635 answer.reg(),
5636 left->reg(),
5637 rcx,
5638 overwrite_mode);
5639 __ movq(answer.reg(), left->reg());
5640 __ or_(answer.reg(), rcx);
5641 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5642
5643 // Perform the operation.
5644 switch (op) {
5645 case Token::SAR:
5646 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5647 break;
5648 case Token::SHR: {
5649 __ SmiShiftLogicalRight(answer.reg(),
5650 left->reg(),
5651 rcx,
5652 deferred->entry_label());
5653 break;
5654 }
5655 case Token::SHL: {
5656 __ SmiShiftLeft(answer.reg(),
5657 left->reg(),
5658 rcx,
5659 deferred->entry_label());
5660 break;
5661 }
5662 default:
5663 UNREACHABLE();
5664 }
5665 deferred->BindExit();
5666 left->Unuse();
5667 right->Unuse();
5668 frame_->Push(&answer);
5669 return;
5670 }
5671
5672 // Handle the other binary operations.
5673 left->ToRegister();
5674 right->ToRegister();
5675 // A newly allocated register answer is used to hold the answer. The
5676 // registers containing left and right are not modified so they don't
5677 // need to be spilled in the fast case.
5678 Result answer = allocator_->Allocate();
5679 ASSERT(answer.is_valid());
5680
5681 // Perform the smi tag check.
5682 DeferredInlineBinaryOperation* deferred =
5683 new DeferredInlineBinaryOperation(op,
5684 answer.reg(),
5685 left->reg(),
5686 right->reg(),
5687 overwrite_mode);
5688 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5689
5690 switch (op) {
5691 case Token::ADD:
5692 __ SmiAdd(answer.reg(),
5693 left->reg(),
5694 right->reg(),
5695 deferred->entry_label());
5696 break;
5697
5698 case Token::SUB:
5699 __ SmiSub(answer.reg(),
5700 left->reg(),
5701 right->reg(),
5702 deferred->entry_label());
5703 break;
5704
5705 case Token::MUL: {
5706 __ SmiMul(answer.reg(),
5707 left->reg(),
5708 right->reg(),
5709 deferred->entry_label());
5710 break;
5711 }
5712
5713 case Token::BIT_OR:
5714 __ SmiOr(answer.reg(), left->reg(), right->reg());
5715 break;
5716
5717 case Token::BIT_AND:
5718 __ SmiAnd(answer.reg(), left->reg(), right->reg());
5719 break;
5720
5721 case Token::BIT_XOR:
5722 __ SmiXor(answer.reg(), left->reg(), right->reg());
5723 break;
5724
5725 default:
5726 UNREACHABLE();
5727 break;
5728 }
5729 deferred->BindExit();
5730 left->Unuse();
5731 right->Unuse();
5732 frame_->Push(&answer);
5733}
5734
5735
5736#undef __
5737#define __ ACCESS_MASM(masm)
5738
5739
5740Handle<String> Reference::GetName() {
5741 ASSERT(type_ == NAMED);
5742 Property* property = expression_->AsProperty();
5743 if (property == NULL) {
5744 // Global variable reference treated as a named property reference.
5745 VariableProxy* proxy = expression_->AsVariableProxy();
5746 ASSERT(proxy->AsVariable() != NULL);
5747 ASSERT(proxy->AsVariable()->is_global());
5748 return proxy->name();
5749 } else {
5750 Literal* raw_name = property->key()->AsLiteral();
5751 ASSERT(raw_name != NULL);
5752 return Handle<String>(String::cast(*raw_name->handle()));
5753 }
5754}
5755
5756
5757void Reference::GetValue(TypeofState typeof_state) {
5758 ASSERT(!cgen_->in_spilled_code());
5759 ASSERT(cgen_->HasValidEntryRegisters());
5760 ASSERT(!is_illegal());
5761 MacroAssembler* masm = cgen_->masm();
5762
5763 // Record the source position for the property load.
5764 Property* property = expression_->AsProperty();
5765 if (property != NULL) {
5766 cgen_->CodeForSourcePosition(property->position());
5767 }
5768
5769 switch (type_) {
5770 case SLOT: {
5771 Comment cmnt(masm, "[ Load from Slot");
5772 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5773 ASSERT(slot != NULL);
5774 cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
5775 break;
5776 }
5777
5778 case NAMED: {
5779 // TODO(1241834): Make sure that it is safe to ignore the
5780 // distinction between expressions in a typeof and not in a
5781 // typeof. If there is a chance that reference errors can be
5782 // thrown below, we must distinguish between the two kinds of
5783 // loads (typeof expression loads must not throw a reference
5784 // error).
5785 Variable* var = expression_->AsVariableProxy()->AsVariable();
5786 bool is_global = var != NULL;
5787 ASSERT(!is_global || var->is_global());
5788
5789 // Do not inline the inobject property case for loads from the global
5790 // object. Also do not inline for unoptimized code. This saves time
5791 // in the code generator. Unoptimized code is toplevel code or code
5792 // that is not in a loop.
5793 if (is_global ||
5794 cgen_->scope()->is_global_scope() ||
5795 cgen_->loop_nesting() == 0) {
5796 Comment cmnt(masm, "[ Load from named Property");
5797 cgen_->frame()->Push(GetName());
5798
5799 RelocInfo::Mode mode = is_global
5800 ? RelocInfo::CODE_TARGET_CONTEXT
5801 : RelocInfo::CODE_TARGET;
5802 Result answer = cgen_->frame()->CallLoadIC(mode);
5803 // A test rax instruction following the call signals that the
5804 // inobject property case was inlined. Ensure that there is not
5805 // a test rax instruction here.
5806 __ nop();
5807 cgen_->frame()->Push(&answer);
5808 } else {
5809 // Inline the inobject property case.
5810 Comment cmnt(masm, "[ Inlined named property load");
5811 Result receiver = cgen_->frame()->Pop();
5812 receiver.ToRegister();
5813 Result value = cgen_->allocator()->Allocate();
5814 ASSERT(value.is_valid());
5815 // Cannot use r12 for receiver, because that changes
5816 // the distance between a call and a fixup location,
5817 // due to a special encoding of r12 as r/m in a ModR/M byte.
5818 if (receiver.reg().is(r12)) {
5819 // Swap receiver and value.
5820 __ movq(value.reg(), receiver.reg());
5821 Result temp = receiver;
5822 receiver = value;
5823 value = temp;
5824 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
5825 }
5826
5827 DeferredReferenceGetNamedValue* deferred =
5828 new DeferredReferenceGetNamedValue(value.reg(),
5829 receiver.reg(),
5830 GetName());
5831
5832 // Check that the receiver is a heap object.
5833 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5834
5835 __ bind(deferred->patch_site());
5836 // This is the map check instruction that will be patched (so we can't
5837 // use the double underscore macro that may insert instructions).
5838 // Initially use an invalid map to force a failure.
5839 masm->Move(kScratchRegister, Factory::null_value());
5840 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5841 kScratchRegister);
5842 // This branch is always a forwards branch so it's always a fixed
5843 // size which allows the assert below to succeed and patching to work.
5844 // Don't use deferred->Branch(...), since that might add coverage code.
5845 masm->j(not_equal, deferred->entry_label());
5846
5847 // The delta from the patch label to the load offset must be
5848 // statically known.
5849 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
5850 LoadIC::kOffsetToLoadInstruction);
5851 // The initial (invalid) offset has to be large enough to force
5852 // a 32-bit instruction encoding to allow patching with an
5853 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
5854 int offset = kMaxInt;
5855 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
5856
5857 __ IncrementCounter(&Counters::named_load_inline, 1);
5858 deferred->BindExit();
5859 cgen_->frame()->Push(&receiver);
5860 cgen_->frame()->Push(&value);
5861 }
5862 break;
5863 }
5864
5865 case KEYED: {
5866 // TODO(1241834): Make sure that this it is safe to ignore the
5867 // distinction between expressions in a typeof and not in a typeof.
5868 Comment cmnt(masm, "[ Load from keyed Property");
5869 Variable* var = expression_->AsVariableProxy()->AsVariable();
5870 bool is_global = var != NULL;
5871 ASSERT(!is_global || var->is_global());
5872
5873 // Inline array load code if inside of a loop. We do not know
5874 // the receiver map yet, so we initially generate the code with
5875 // a check against an invalid map. In the inline cache code, we
5876 // patch the map check if appropriate.
5877 if (cgen_->loop_nesting() > 0) {
5878 Comment cmnt(masm, "[ Inlined load from keyed Property");
5879
5880 Result key = cgen_->frame()->Pop();
5881 Result receiver = cgen_->frame()->Pop();
5882 key.ToRegister();
5883 receiver.ToRegister();
5884
5885 // Use a fresh temporary to load the elements without destroying
5886 // the receiver which is needed for the deferred slow case.
5887 Result elements = cgen_->allocator()->Allocate();
5888 ASSERT(elements.is_valid());
5889
5890 // Use a fresh temporary for the index and later the loaded
5891 // value.
5892 Result index = cgen_->allocator()->Allocate();
5893 ASSERT(index.is_valid());
5894
5895 DeferredReferenceGetKeyedValue* deferred =
5896 new DeferredReferenceGetKeyedValue(index.reg(),
5897 receiver.reg(),
5898 key.reg(),
5899 is_global);
5900
5901 // Check that the receiver is not a smi (only needed if this
5902 // is not a load from the global context) and that it has the
5903 // expected map.
5904 if (!is_global) {
5905 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5906 }
5907
5908 // Initially, use an invalid map. The map is patched in the IC
5909 // initialization code.
5910 __ bind(deferred->patch_site());
5911 // Use masm-> here instead of the double underscore macro since extra
5912 // coverage code can interfere with the patching.
5913 masm->movq(kScratchRegister, Factory::null_value(),
5914 RelocInfo::EMBEDDED_OBJECT);
5915 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5916 kScratchRegister);
5917 deferred->Branch(not_equal);
5918
5919 // Check that the key is a non-negative smi.
5920 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
5921
5922 // Get the elements array from the receiver and check that it
5923 // is not a dictionary.
5924 __ movq(elements.reg(),
5925 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5926 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5927 Factory::fixed_array_map());
5928 deferred->Branch(not_equal);
5929
5930 // Shift the key to get the actual index value and check that
5931 // it is within bounds.
5932 __ SmiToInteger32(index.reg(), key.reg());
5933 __ cmpl(index.reg(),
5934 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
5935 deferred->Branch(above_equal);
5936
5937 // The index register holds the un-smi-tagged key. It has been
5938 // zero-extended to 64-bits, so it can be used directly as index in the
5939 // operand below.
5940 // Load and check that the result is not the hole. We could
5941 // reuse the index or elements register for the value.
5942 //
5943 // TODO(206): Consider whether it makes sense to try some
5944 // heuristic about which register to reuse. For example, if
5945 // one is rax, the we can reuse that one because the value
5946 // coming from the deferred code will be in rax.
5947 Result value = index;
5948 __ movq(value.reg(),
5949 Operand(elements.reg(),
5950 index.reg(),
5951 times_pointer_size,
5952 FixedArray::kHeaderSize - kHeapObjectTag));
5953 elements.Unuse();
5954 index.Unuse();
5955 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
5956 deferred->Branch(equal);
5957 __ IncrementCounter(&Counters::keyed_load_inline, 1);
5958
5959 deferred->BindExit();
5960 // Restore the receiver and key to the frame and push the
5961 // result on top of it.
5962 cgen_->frame()->Push(&receiver);
5963 cgen_->frame()->Push(&key);
5964 cgen_->frame()->Push(&value);
5965
5966 } else {
5967 Comment cmnt(masm, "[ Load from keyed Property");
5968 RelocInfo::Mode mode = is_global
5969 ? RelocInfo::CODE_TARGET_CONTEXT
5970 : RelocInfo::CODE_TARGET;
5971 Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
5972 // Make sure that we do not have a test instruction after the
5973 // call. A test instruction after the call is used to
5974 // indicate that we have generated an inline version of the
5975 // keyed load. The explicit nop instruction is here because
5976 // the push that follows might be peep-hole optimized away.
5977 __ nop();
5978 cgen_->frame()->Push(&answer);
5979 }
5980 break;
5981 }
5982
5983 default:
5984 UNREACHABLE();
5985 }
5986}
5987
5988
5989void Reference::TakeValue(TypeofState typeof_state) {
5990 // TODO(X64): This function is completely architecture independent. Move
5991 // it somewhere shared.
5992
5993 // For non-constant frame-allocated slots, we invalidate the value in the
5994 // slot. For all others, we fall back on GetValue.
5995 ASSERT(!cgen_->in_spilled_code());
5996 ASSERT(!is_illegal());
5997 if (type_ != SLOT) {
5998 GetValue(typeof_state);
5999 return;
6000 }
6001
6002 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6003 ASSERT(slot != NULL);
6004 if (slot->type() == Slot::LOOKUP ||
6005 slot->type() == Slot::CONTEXT ||
6006 slot->var()->mode() == Variable::CONST ||
6007 slot->is_arguments()) {
6008 GetValue(typeof_state);
6009 return;
6010 }
6011
6012 // Only non-constant, frame-allocated parameters and locals can reach
6013 // here. Be careful not to use the optimizations for arguments
6014 // object access since it may not have been initialized yet.
6015 ASSERT(!slot->is_arguments());
6016 if (slot->type() == Slot::PARAMETER) {
6017 cgen_->frame()->TakeParameterAt(slot->index());
6018 } else {
6019 ASSERT(slot->type() == Slot::LOCAL);
6020 cgen_->frame()->TakeLocalAt(slot->index());
6021 }
6022}
6023
6024
6025void Reference::SetValue(InitState init_state) {
6026 ASSERT(cgen_->HasValidEntryRegisters());
6027 ASSERT(!is_illegal());
6028 MacroAssembler* masm = cgen_->masm();
6029 switch (type_) {
6030 case SLOT: {
6031 Comment cmnt(masm, "[ Store to Slot");
6032 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6033 ASSERT(slot != NULL);
6034 cgen_->StoreToSlot(slot, init_state);
6035 break;
6036 }
6037
6038 case NAMED: {
6039 Comment cmnt(masm, "[ Store to named Property");
6040 cgen_->frame()->Push(GetName());
6041 Result answer = cgen_->frame()->CallStoreIC();
6042 cgen_->frame()->Push(&answer);
6043 break;
6044 }
6045
6046 case KEYED: {
6047 Comment cmnt(masm, "[ Store to keyed Property");
6048
6049 // Generate inlined version of the keyed store if the code is in
6050 // a loop and the key is likely to be a smi.
6051 Property* property = expression()->AsProperty();
6052 ASSERT(property != NULL);
6053 SmiAnalysis* key_smi_analysis = property->key()->type();
6054
6055 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6056 Comment cmnt(masm, "[ Inlined store to keyed Property");
6057
6058 // Get the receiver, key and value into registers.
6059 Result value = cgen_->frame()->Pop();
6060 Result key = cgen_->frame()->Pop();
6061 Result receiver = cgen_->frame()->Pop();
6062
6063 Result tmp = cgen_->allocator_->Allocate();
6064 ASSERT(tmp.is_valid());
6065
6066 // Determine whether the value is a constant before putting it
6067 // in a register.
6068 bool value_is_constant = value.is_constant();
6069
6070 // Make sure that value, key and receiver are in registers.
6071 value.ToRegister();
6072 key.ToRegister();
6073 receiver.ToRegister();
6074
6075 DeferredReferenceSetKeyedValue* deferred =
6076 new DeferredReferenceSetKeyedValue(value.reg(),
6077 key.reg(),
6078 receiver.reg());
6079
6080 // Check that the value is a smi if it is not a constant.
6081 // We can skip the write barrier for smis and constants.
6082 if (!value_is_constant) {
6083 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6084 }
6085
6086 // Check that the key is a non-negative smi.
6087 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6088 // Ensure that the smi is zero-extended. This is not guaranteed.
6089 __ movl(key.reg(), key.reg());
6090
6091 // Check that the receiver is not a smi.
6092 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6093
6094 // Check that the receiver is a JSArray.
6095 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6096 deferred->Branch(not_equal);
6097
6098 // Check that the key is within bounds. Both the key and the
6099 // length of the JSArray are smis, so compare only low 32 bits.
6100 __ cmpl(key.reg(),
6101 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
6102 deferred->Branch(greater_equal);
6103
6104 // Get the elements array from the receiver and check that it
6105 // is a flat array (not a dictionary).
6106 __ movq(tmp.reg(),
6107 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6108 // Bind the deferred code patch site to be able to locate the
6109 // fixed array map comparison. When debugging, we patch this
6110 // comparison to always fail so that we will hit the IC call
6111 // in the deferred code which will allow the debugger to
6112 // break for fast case stores.
6113 __ bind(deferred->patch_site());
6114 // Avoid using __ to ensure the distance from patch_site
6115 // to the map address is always the same.
6116 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6117 RelocInfo::EMBEDDED_OBJECT);
6118 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6119 kScratchRegister);
6120 deferred->Branch(not_equal);
6121
6122 // Store the value.
6123 SmiIndex index =
6124 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6125 __ movq(Operand(tmp.reg(),
6126 index.reg,
6127 index.scale,
6128 FixedArray::kHeaderSize - kHeapObjectTag),
6129 value.reg());
6130 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6131
6132 deferred->BindExit();
6133
6134 cgen_->frame()->Push(&receiver);
6135 cgen_->frame()->Push(&key);
6136 cgen_->frame()->Push(&value);
6137 } else {
6138 Result answer = cgen_->frame()->CallKeyedStoreIC();
6139 // Make sure that we do not have a test instruction after the
6140 // call. A test instruction after the call is used to
6141 // indicate that we have generated an inline version of the
6142 // keyed store.
6143 masm->nop();
6144 cgen_->frame()->Push(&answer);
6145 }
6146 break;
6147 }
6148
6149 default:
6150 UNREACHABLE();
6151 }
6152}
6153
6154
6155void ToBooleanStub::Generate(MacroAssembler* masm) {
6156 Label false_result, true_result, not_string;
6157 __ movq(rax, Operand(rsp, 1 * kPointerSize));
6158
6159 // 'null' => false.
6160 __ CompareRoot(rax, Heap::kNullValueRootIndex);
6161 __ j(equal, &false_result);
6162
6163 // Get the map and type of the heap object.
6164 // We don't use CmpObjectType because we manipulate the type field.
6165 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6166 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
6167
6168 // Undetectable => false.
6169 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
6170 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
6171 __ j(not_zero, &false_result);
6172
6173 // JavaScript object => true.
6174 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
6175 __ j(above_equal, &true_result);
6176
6177 // String value => false iff empty.
6178 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
6179 __ j(above_equal, &not_string);
6180 __ and_(rcx, Immediate(kStringSizeMask));
6181 __ cmpq(rcx, Immediate(kShortStringTag));
6182 __ j(not_equal, &true_result); // Empty string is always short.
6183 __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
6184 __ shr(rdx, Immediate(String::kShortLengthShift));
6185 __ j(zero, &false_result);
6186 __ jmp(&true_result);
6187
6188 __ bind(&not_string);
6189 // HeapNumber => false iff +0, -0, or NaN.
6190 // These three cases set C3 when compared to zero in the FPU.
6191 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6192 __ j(not_equal, &true_result);
6193 // TODO(x64): Don't use fp stack, use MMX registers?
6194 __ fldz(); // Load zero onto fp stack
6195 // Load heap-number double value onto fp stack
6196 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
6197 __ fucompp(); // Compare and pop both values.
6198 __ movq(kScratchRegister, rax);
6199 __ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions.
6200 __ testl(rax, Immediate(0x4000)); // Test FP condition flag C3, bit 16.
6201 __ movq(rax, kScratchRegister);
6202 __ j(not_zero, &false_result);
6203 // Fall through to |true_result|.
6204
6205 // Return 1/0 for true/false in rax.
6206 __ bind(&true_result);
6207 __ movq(rax, Immediate(1));
6208 __ ret(1 * kPointerSize);
6209 __ bind(&false_result);
6210 __ xor_(rax, rax);
6211 __ ret(1 * kPointerSize);
6212}
6213
6214
6215bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
6216 // TODO(X64): This method is identical to the ia32 version.
6217 // Either find a reason to change it, or move it somewhere where it can be
6218 // shared. (Notice: It assumes that a Smi can fit in an int).
6219
6220 Object* answer_object = Heap::undefined_value();
6221 switch (op) {
6222 case Token::ADD:
6223 if (Smi::IsValid(left + right)) {
6224 answer_object = Smi::FromInt(left + right);
6225 }
6226 break;
6227 case Token::SUB:
6228 if (Smi::IsValid(left - right)) {
6229 answer_object = Smi::FromInt(left - right);
6230 }
6231 break;
6232 case Token::MUL: {
6233 double answer = static_cast<double>(left) * right;
6234 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
6235 // If the product is zero and the non-zero factor is negative,
6236 // the spec requires us to return floating point negative zero.
6237 if (answer != 0 || (left + right) >= 0) {
6238 answer_object = Smi::FromInt(static_cast<int>(answer));
6239 }
6240 }
6241 }
6242 break;
6243 case Token::DIV:
6244 case Token::MOD:
6245 break;
6246 case Token::BIT_OR:
6247 answer_object = Smi::FromInt(left | right);
6248 break;
6249 case Token::BIT_AND:
6250 answer_object = Smi::FromInt(left & right);
6251 break;
6252 case Token::BIT_XOR:
6253 answer_object = Smi::FromInt(left ^ right);
6254 break;
6255
6256 case Token::SHL: {
6257 int shift_amount = right & 0x1F;
6258 if (Smi::IsValid(left << shift_amount)) {
6259 answer_object = Smi::FromInt(left << shift_amount);
6260 }
6261 break;
6262 }
6263 case Token::SHR: {
6264 int shift_amount = right & 0x1F;
6265 unsigned int unsigned_left = left;
6266 unsigned_left >>= shift_amount;
6267 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
6268 answer_object = Smi::FromInt(unsigned_left);
6269 }
6270 break;
6271 }
6272 case Token::SAR: {
6273 int shift_amount = right & 0x1F;
6274 unsigned int unsigned_left = left;
6275 if (left < 0) {
6276 // Perform arithmetic shift of a negative number by
6277 // complementing number, logical shifting, complementing again.
6278 unsigned_left = ~unsigned_left;
6279 unsigned_left >>= shift_amount;
6280 unsigned_left = ~unsigned_left;
6281 } else {
6282 unsigned_left >>= shift_amount;
6283 }
6284 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
6285 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
6286 break;
6287 }
6288 default:
6289 UNREACHABLE();
6290 break;
6291 }
6292 if (answer_object == Heap::undefined_value()) {
6293 return false;
6294 }
6295 frame_->Push(Handle<Object>(answer_object));
6296 return true;
6297}
6298
6299
6300// End of CodeGenerator implementation.
6301
6302void UnarySubStub::Generate(MacroAssembler* masm) {
6303 Label slow;
6304 Label done;
6305 Label try_float;
6306 Label special;
6307 // Check whether the value is a smi.
6308 __ JumpIfNotSmi(rax, &try_float);
6309
6310 // Enter runtime system if the value of the smi is zero
6311 // to make sure that we switch between 0 and -0.
6312 // Also enter it if the value of the smi is Smi::kMinValue
6313 __ testl(rax, Immediate(0x7FFFFFFE));
6314 __ j(zero, &special);
6315 __ negl(rax);
6316 __ jmp(&done);
6317
6318 __ bind(&special);
6319 // Either zero or -0x4000000, neither of which become a smi when negated.
6320 __ testl(rax, rax);
6321 __ j(not_zero, &slow);
6322 __ Move(rax, Factory::minus_zero_value());
6323 __ jmp(&done);
6324
6325 // Enter runtime system.
6326 __ bind(&slow);
6327 __ pop(rcx); // pop return address
6328 __ push(rax);
6329 __ push(rcx); // push return address
6330 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
6331 __ jmp(&done);
6332
6333 // Try floating point case.
6334 __ bind(&try_float);
6335 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6336 __ Cmp(rdx, Factory::heap_number_map());
6337 __ j(not_equal, &slow);
6338 // Operand is a float, negate its value by flipping sign bit.
6339 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
6340 __ movq(kScratchRegister, Immediate(0x01));
6341 __ shl(kScratchRegister, Immediate(63));
6342 __ xor_(rdx, kScratchRegister); // Flip sign.
6343 // rdx is value to store.
6344 if (overwrite_) {
6345 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
6346 } else {
6347 FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
6348 // rcx: allocated 'empty' number
6349 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
6350 __ movq(rax, rcx);
6351 }
6352
6353 __ bind(&done);
6354 __ StubReturn(1);
6355}
6356
6357
6358void CompareStub::Generate(MacroAssembler* masm) {
6359 Label call_builtin, done;
6360
6361 // NOTICE! This code is only reached after a smi-fast-case check, so
6362 // it is certain that at least one operand isn't a smi.
6363
6364 if (cc_ == equal) { // Both strict and non-strict.
6365 Label slow; // Fallthrough label.
6366 // Equality is almost reflexive (everything but NaN), so start by testing
6367 // for "identity and not NaN".
6368 {
6369 Label not_identical;
6370 __ cmpq(rax, rdx);
6371 __ j(not_equal, &not_identical);
6372 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6373 // so we do the second best thing - test it ourselves.
6374
6375 Label return_equal;
6376 Label heap_number;
6377 // If it's not a heap number, then return equal.
6378 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
6379 Factory::heap_number_map());
6380 __ j(equal, &heap_number);
6381 __ bind(&return_equal);
6382 __ xor_(rax, rax);
6383 __ ret(0);
6384
6385 __ bind(&heap_number);
6386 // It is a heap number, so return non-equal if it's NaN and equal if it's
6387 // not NaN.
6388 // The representation of NaN values has all exponent bits (52..62) set,
6389 // and not all mantissa bits (0..51) clear.
6390 // Read double representation into rax.
6391 __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
6392 __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
6393 // Test that exponent bits are all set.
6394 __ or_(rbx, rax);
6395 __ cmpq(rbx, rax);
6396 __ j(not_equal, &return_equal);
6397 // Shift out flag and all exponent bits, retaining only mantissa.
6398 __ shl(rax, Immediate(12));
6399 // If all bits in the mantissa are zero the number is Infinity, and
6400 // we return zero. Otherwise it is a NaN, and we return non-zero.
6401 // We cannot just return rax because only eax is tested on return.
6402 __ setcc(not_zero, rax);
6403 __ ret(0);
6404
6405 __ bind(&not_identical);
6406 }
6407
6408 // If we're doing a strict equality comparison, we don't have to do
6409 // type conversion, so we generate code to do fast comparison for objects
6410 // and oddballs. Non-smi numbers and strings still go through the usual
6411 // slow-case code.
6412 if (strict_) {
6413 // If either is a Smi (we know that not both are), then they can only
6414 // be equal if the other is a HeapNumber. If so, use the slow case.
6415 {
6416 Label not_smis;
6417 __ SelectNonSmi(rbx, rax, rdx, &not_smis);
6418
6419 // Check if the non-smi operand is a heap number.
6420 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
6421 Factory::heap_number_map());
6422 // If heap number, handle it in the slow case.
6423 __ j(equal, &slow);
6424 // Return non-equal. ebx (the lower half of rbx) is not zero.
6425 __ movq(rax, rbx);
6426 __ ret(0);
6427
6428 __ bind(&not_smis);
6429 }
6430
6431 // If either operand is a JSObject or an oddball value, then they are not
6432 // equal since their pointers are different
6433 // There is no test for undetectability in strict equality.
6434
6435 // If the first object is a JS object, we have done pointer comparison.
6436 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6437 Label first_non_object;
6438 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
6439 __ j(below, &first_non_object);
6440 // Return non-zero (eax (not rax) is not zero)
6441 Label return_not_equal;
6442 ASSERT(kHeapObjectTag != 0);
6443 __ bind(&return_not_equal);
6444 __ ret(0);
6445
6446 __ bind(&first_non_object);
6447 // Check for oddballs: true, false, null, undefined.
6448 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6449 __ j(equal, &return_not_equal);
6450
6451 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
6452 __ j(above_equal, &return_not_equal);
6453
6454 // Check for oddballs: true, false, null, undefined.
6455 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6456 __ j(equal, &return_not_equal);
6457
6458 // Fall through to the general case.
6459 }
6460 __ bind(&slow);
6461 }
6462
6463 // Push arguments below the return address to prepare jump to builtin.
6464 __ pop(rcx);
6465 __ push(rax);
6466 __ push(rdx);
6467 __ push(rcx);
6468
6469 // Inlined floating point compare.
6470 // Call builtin if operands are not floating point or smi.
6471 Label check_for_symbols;
6472 // Push arguments on stack, for helper functions.
6473 FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
6474 FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
6475 __ FCmp();
6476
6477 // Jump to builtin for NaN.
6478 __ j(parity_even, &call_builtin);
6479
6480 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
6481 Label below_lbl, above_lbl;
6482 // use rdx, rax to convert unsigned to signed comparison
6483 __ j(below, &below_lbl);
6484 __ j(above, &above_lbl);
6485
6486 __ xor_(rax, rax); // equal
6487 __ ret(2 * kPointerSize);
6488
6489 __ bind(&below_lbl);
6490 __ movq(rax, Immediate(-1));
6491 __ ret(2 * kPointerSize);
6492
6493 __ bind(&above_lbl);
6494 __ movq(rax, Immediate(1));
6495 __ ret(2 * kPointerSize); // rax, rdx were pushed
6496
6497 // Fast negative check for symbol-to-symbol equality.
6498 __ bind(&check_for_symbols);
6499 if (cc_ == equal) {
6500 BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
6501 BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
6502
6503 // We've already checked for object identity, so if both operands
6504 // are symbols they aren't equal. Register eax (not rax) already holds a
6505 // non-zero value, which indicates not equal, so just return.
6506 __ ret(2 * kPointerSize);
6507 }
6508
6509 __ bind(&call_builtin);
6510 // must swap argument order
6511 __ pop(rcx);
6512 __ pop(rdx);
6513 __ pop(rax);
6514 __ push(rdx);
6515 __ push(rax);
6516
6517 // Figure out which native to call and setup the arguments.
6518 Builtins::JavaScript builtin;
6519 if (cc_ == equal) {
6520 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6521 } else {
6522 builtin = Builtins::COMPARE;
6523 int ncr; // NaN compare result
6524 if (cc_ == less || cc_ == less_equal) {
6525 ncr = GREATER;
6526 } else {
6527 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
6528 ncr = LESS;
6529 }
6530 __ push(Immediate(Smi::FromInt(ncr)));
6531 }
6532
6533 // Restore return address on the stack.
6534 __ push(rcx);
6535
6536 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6537 // tagged as a small integer.
6538 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
6539}
6540
6541
6542void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
6543 Label* label,
6544 Register object,
6545 Register scratch) {
6546 __ JumpIfSmi(object, label);
6547 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
6548 __ movzxbq(scratch,
6549 FieldOperand(scratch, Map::kInstanceTypeOffset));
6550 __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
6551 __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
6552 __ j(not_equal, label);
6553}
6554
6555
6556// Call the function just below TOS on the stack with the given
6557// arguments. The receiver is the TOS.
6558void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
6559 int position) {
6560 // Push the arguments ("left-to-right") on the stack.
6561 int arg_count = args->length();
6562 for (int i = 0; i < arg_count; i++) {
6563 Load(args->at(i));
6564 }
6565
6566 // Record the position for debugging purposes.
6567 CodeForSourcePosition(position);
6568
6569 // Use the shared code stub to call the function.
6570 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
6571 CallFunctionStub call_function(arg_count, in_loop);
6572 Result answer = frame_->CallStub(&call_function, arg_count + 1);
6573 // Restore context and replace function on the stack with the
6574 // result of the stub invocation.
6575 frame_->RestoreContextRegister();
6576 frame_->SetElementAt(0, &answer);
6577}
6578
6579
6580void InstanceofStub::Generate(MacroAssembler* masm) {
6581 // Implements "value instanceof function" operator.
6582 // Expected input state:
6583 // rsp[0] : return address
6584 // rsp[1] : function pointer
6585 // rsp[2] : value
6586
6587 // Get the object - go slow case if it's a smi.
6588 Label slow;
6589 __ movq(rax, Operand(rsp, 2 * kPointerSize));
6590 __ JumpIfSmi(rax, &slow);
6591
6592 // Check that the left hand is a JS object. Leave its map in rax.
6593 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
6594 __ j(below, &slow);
6595 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
6596 __ j(above, &slow);
6597
6598 // Get the prototype of the function.
6599 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6600 __ TryGetFunctionPrototype(rdx, rbx, &slow);
6601
6602 // Check that the function prototype is a JS object.
6603 __ JumpIfSmi(rbx, &slow);
6604 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
6605 __ j(below, &slow);
6606 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
6607 __ j(above, &slow);
6608
6609 // Register mapping: rax is object map and rbx is function prototype.
6610 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
6611
6612 // Loop through the prototype chain looking for the function prototype.
6613 Label loop, is_instance, is_not_instance;
6614 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
6615 __ bind(&loop);
6616 __ cmpq(rcx, rbx);
6617 __ j(equal, &is_instance);
6618 __ cmpq(rcx, kScratchRegister);
6619 __ j(equal, &is_not_instance);
6620 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
6621 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
6622 __ jmp(&loop);
6623
6624 __ bind(&is_instance);
6625 __ xor_(rax, rax);
6626 __ ret(2 * kPointerSize);
6627
6628 __ bind(&is_not_instance);
6629 __ movq(rax, Immediate(Smi::FromInt(1)));
6630 __ ret(2 * kPointerSize);
6631
6632 // Slow-case: Go through the JavaScript implementation.
6633 __ bind(&slow);
6634 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
6635}
6636
6637
6638void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6639 // The displacement is used for skipping the return address and the
6640 // frame pointer on the stack. It is the offset of the last
6641 // parameter (if any) relative to the frame pointer.
6642 static const int kDisplacement = 2 * kPointerSize;
6643
6644 // Check if the calling frame is an arguments adaptor frame.
6645 Label runtime;
6646 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6647 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
6648 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6649 __ j(not_equal, &runtime);
6650 // Value in rcx is Smi encoded.
6651
6652 // Patch the arguments.length and the parameters pointer.
6653 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6654 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
6655 SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
6656 __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
6657 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
6658
6659 // Do the runtime call to allocate the arguments object.
6660 __ bind(&runtime);
6661 Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
6662 __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
6663}
6664
6665
6666void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6667 // The key is in rdx and the parameter count is in rax.
6668
6669 // The displacement is used for skipping the frame pointer on the
6670 // stack. It is the offset of the last parameter (if any) relative
6671 // to the frame pointer.
6672 static const int kDisplacement = 1 * kPointerSize;
6673
6674 // Check that the key is a smi.
6675 Label slow;
6676 __ JumpIfNotSmi(rdx, &slow);
6677
6678 // Check if the calling frame is an arguments adaptor frame.
6679 Label adaptor;
6680 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6681 __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
6682 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6683 __ j(equal, &adaptor);
6684
6685 // Check index against formal parameters count limit passed in
6686 // through register rax. Use unsigned comparison to get negative
6687 // check for free.
6688 __ cmpq(rdx, rax);
6689 __ j(above_equal, &slow);
6690
6691 // Read the argument from the stack and return it.
6692 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
6693 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
6694 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6695 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6696 __ Ret();
6697
6698 // Arguments adaptor case: Check index against actual arguments
6699 // limit found in the arguments adaptor frame. Use unsigned
6700 // comparison to get negative check for free.
6701 __ bind(&adaptor);
6702 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6703 __ cmpq(rdx, rcx);
6704 __ j(above_equal, &slow);
6705
6706 // Read the argument from the stack and return it.
6707 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
6708 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
6709 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6710 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6711 __ Ret();
6712
6713 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6714 // by calling the runtime system.
6715 __ bind(&slow);
6716 __ pop(rbx); // Return address.
6717 __ push(rdx);
6718 __ push(rbx);
6719 Runtime::Function* f =
6720 Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
6721 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
6722}
6723
6724
6725void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6726 // Check if the calling frame is an arguments adaptor frame.
6727 Label adaptor;
6728 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6729 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
6730 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6731 __ j(equal, &adaptor);
6732
6733 // Nothing to do: The formal number of parameters has already been
6734 // passed in register rax by calling function. Just return it.
6735 __ ret(0);
6736
6737 // Arguments adaptor case: Read the arguments length from the
6738 // adaptor frame and return it.
6739 __ bind(&adaptor);
6740 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6741 __ ret(0);
6742}
6743
6744
6745int CEntryStub::MinorKey() {
6746 ASSERT(result_size_ <= 2);
6747#ifdef _WIN64
6748 // Simple results returned in rax (using default code).
6749 // Complex results must be written to address passed as first argument.
6750 // Use even numbers for minor keys, reserving the odd numbers for
6751 // CEntryDebugBreakStub.
6752 return (result_size_ < 2) ? 0 : result_size_ * 2;
6753#else
6754 // Single results returned in rax (both AMD64 and Win64 calling conventions)
6755 // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
6756 // by default.
6757 return 0;
6758#endif
6759}
6760
6761
6762void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6763 // Check that stack should contain next handler, frame pointer, state and
6764 // return address in that order.
6765 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6766 StackHandlerConstants::kStateOffset);
6767 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6768 StackHandlerConstants::kPCOffset);
6769
6770 ExternalReference handler_address(Top::k_handler_address);
6771 __ movq(kScratchRegister, handler_address);
6772 __ movq(rsp, Operand(kScratchRegister, 0));
6773 // get next in chain
6774 __ pop(rcx);
6775 __ movq(Operand(kScratchRegister, 0), rcx);
6776 __ pop(rbp); // pop frame pointer
6777 __ pop(rdx); // remove state
6778
6779 // Before returning we restore the context from the frame pointer if not NULL.
6780 // The frame pointer is NULL in the exception handler of a JS entry frame.
6781 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
6782 Label skip;
6783 __ cmpq(rbp, Immediate(0));
6784 __ j(equal, &skip);
6785 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6786 __ bind(&skip);
6787 __ ret(0);
6788}
6789
6790
6791void CEntryStub::GenerateCore(MacroAssembler* masm,
6792 Label* throw_normal_exception,
6793 Label* throw_termination_exception,
6794 Label* throw_out_of_memory_exception,
6795 StackFrame::Type frame_type,
6796 bool do_gc,
6797 bool always_allocate_scope) {
6798 // rax: result parameter for PerformGC, if any.
6799 // rbx: pointer to C function (C callee-saved).
6800 // rbp: frame pointer (restored after C call).
6801 // rsp: stack pointer (restored after C call).
6802 // r14: number of arguments including receiver (C callee-saved).
6803 // r15: pointer to the first argument (C callee-saved).
6804 // This pointer is reused in LeaveExitFrame(), so it is stored in a
6805 // callee-saved register.
6806
6807 if (do_gc) {
6808 // Pass failure code returned from last attempt as first argument to GC.
6809#ifdef _WIN64
6810 __ movq(rcx, rax);
6811#else // ! defined(_WIN64)
6812 __ movq(rdi, rax);
6813#endif
6814 __ movq(kScratchRegister,
6815 FUNCTION_ADDR(Runtime::PerformGC),
6816 RelocInfo::RUNTIME_ENTRY);
6817 __ call(kScratchRegister);
6818 }
6819
6820 ExternalReference scope_depth =
6821 ExternalReference::heap_always_allocate_scope_depth();
6822 if (always_allocate_scope) {
6823 __ movq(kScratchRegister, scope_depth);
6824 __ incl(Operand(kScratchRegister, 0));
6825 }
6826
6827 // Call C function.
6828#ifdef _WIN64
6829 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
6830 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
6831 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
6832 __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
6833 if (result_size_ < 2) {
6834 // Pass a pointer to the Arguments object as the first argument.
6835 // Return result in single register (rax).
6836 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
6837 } else {
6838 ASSERT_EQ(2, result_size_);
6839 // Pass a pointer to the result location as the first argument.
6840 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
6841 // Pass a pointer to the Arguments object as the second argument.
6842 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
6843 }
6844
6845#else // ! defined(_WIN64)
6846 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
6847 __ movq(rdi, r14); // argc.
6848 __ movq(rsi, r15); // argv.
6849#endif
6850 __ call(rbx);
6851 // Result is in rax - do not destroy this register!
6852
6853 if (always_allocate_scope) {
6854 __ movq(kScratchRegister, scope_depth);
6855 __ decl(Operand(kScratchRegister, 0));
6856 }
6857
6858 // Check for failure result.
6859 Label failure_returned;
6860 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6861 __ lea(rcx, Operand(rax, 1));
6862 // Lower 2 bits of rcx are 0 iff rax has failure tag.
6863 __ testl(rcx, Immediate(kFailureTagMask));
6864 __ j(zero, &failure_returned);
6865
6866 // Exit the JavaScript to C++ exit frame.
6867 __ LeaveExitFrame(frame_type, result_size_);
6868 __ ret(0);
6869
6870 // Handling of failure.
6871 __ bind(&failure_returned);
6872
6873 Label retry;
6874 // If the returned exception is RETRY_AFTER_GC continue at retry label
6875 ASSERT(Failure::RETRY_AFTER_GC == 0);
6876 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6877 __ j(zero, &retry);
6878
6879 // Special handling of out of memory exceptions.
6880 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
6881 __ cmpq(rax, kScratchRegister);
6882 __ j(equal, throw_out_of_memory_exception);
6883
6884 // Retrieve the pending exception and clear the variable.
6885 ExternalReference pending_exception_address(Top::k_pending_exception_address);
6886 __ movq(kScratchRegister, pending_exception_address);
6887 __ movq(rax, Operand(kScratchRegister, 0));
6888 __ movq(rdx, ExternalReference::the_hole_value_location());
6889 __ movq(rdx, Operand(rdx, 0));
6890 __ movq(Operand(kScratchRegister, 0), rdx);
6891
6892 // Special handling of termination exceptions which are uncatchable
6893 // by javascript code.
6894 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
6895 __ j(equal, throw_termination_exception);
6896
6897 // Handle normal exception.
6898 __ jmp(throw_normal_exception);
6899
6900 // Retry.
6901 __ bind(&retry);
6902}
6903
6904
6905void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6906 UncatchableExceptionType type) {
6907 // Fetch top stack handler.
6908 ExternalReference handler_address(Top::k_handler_address);
6909 __ movq(kScratchRegister, handler_address);
6910 __ movq(rsp, Operand(kScratchRegister, 0));
6911
6912 // Unwind the handlers until the ENTRY handler is found.
6913 Label loop, done;
6914 __ bind(&loop);
6915 // Load the type of the current stack handler.
6916 const int kStateOffset = StackHandlerConstants::kStateOffset;
6917 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
6918 __ j(equal, &done);
6919 // Fetch the next handler in the list.
6920 const int kNextOffset = StackHandlerConstants::kNextOffset;
6921 __ movq(rsp, Operand(rsp, kNextOffset));
6922 __ jmp(&loop);
6923 __ bind(&done);
6924
6925 // Set the top handler address to next handler past the current ENTRY handler.
6926 __ movq(kScratchRegister, handler_address);
6927 __ pop(Operand(kScratchRegister, 0));
6928
6929 if (type == OUT_OF_MEMORY) {
6930 // Set external caught exception to false.
6931 ExternalReference external_caught(Top::k_external_caught_exception_address);
6932 __ movq(rax, Immediate(false));
6933 __ store_rax(external_caught);
6934
6935 // Set pending exception and rax to out of memory exception.
6936 ExternalReference pending_exception(Top::k_pending_exception_address);
6937 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
6938 __ store_rax(pending_exception);
6939 }
6940
6941 // Clear the context pointer.
6942 __ xor_(rsi, rsi);
6943
6944 // Restore registers from handler.
6945 ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
6946 StackHandlerConstants::kFPOffset);
6947 __ pop(rbp); // FP
6948 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6949 StackHandlerConstants::kStateOffset);
6950 __ pop(rdx); // State
6951
6952 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6953 StackHandlerConstants::kPCOffset);
6954 __ ret(0);
6955}
6956
6957
6958void CallFunctionStub::Generate(MacroAssembler* masm) {
6959 Label slow;
6960
6961 // Get the function to call from the stack.
6962 // +2 ~ receiver, return address
6963 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
6964
6965 // Check that the function really is a JavaScript function.
6966 __ JumpIfSmi(rdi, &slow);
6967 // Goto slow case if we do not have a function.
6968 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
6969 __ j(not_equal, &slow);
6970
6971 // Fast-case: Just invoke the function.
6972 ParameterCount actual(argc_);
6973 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
6974
6975 // Slow-case: Non-function called.
6976 __ bind(&slow);
6977 __ Set(rax, argc_);
6978 __ Set(rbx, 0);
6979 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
6980 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
6981 __ Jump(adaptor, RelocInfo::CODE_TARGET);
6982}
6983
6984
6985void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
6986 // rax: number of arguments including receiver
6987 // rbx: pointer to C function (C callee-saved)
6988 // rbp: frame pointer of calling JS frame (restored after C call)
6989 // rsp: stack pointer (restored after C call)
6990 // rsi: current context (restored)
6991
6992 // NOTE: Invocations of builtins may return failure objects
6993 // instead of a proper result. The builtin entry handles
6994 // this by performing a garbage collection and retrying the
6995 // builtin once.
6996
6997 StackFrame::Type frame_type = is_debug_break ?
6998 StackFrame::EXIT_DEBUG :
6999 StackFrame::EXIT;
7000
7001 // Enter the exit frame that transitions from JavaScript to C++.
7002 __ EnterExitFrame(frame_type, result_size_);
7003
7004 // rax: Holds the context at this point, but should not be used.
7005 // On entry to code generated by GenerateCore, it must hold
7006 // a failure result if the collect_garbage argument to GenerateCore
7007 // is true. This failure result can be the result of code
7008 // generated by a previous call to GenerateCore. The value
7009 // of rax is then passed to Runtime::PerformGC.
7010 // rbx: pointer to builtin function (C callee-saved).
7011 // rbp: frame pointer of exit frame (restored after C call).
7012 // rsp: stack pointer (restored after C call).
7013 // r14: number of arguments including receiver (C callee-saved).
7014 // r15: argv pointer (C callee-saved).
7015
7016 Label throw_normal_exception;
7017 Label throw_termination_exception;
7018 Label throw_out_of_memory_exception;
7019
7020 // Call into the runtime system.
7021 GenerateCore(masm,
7022 &throw_normal_exception,
7023 &throw_termination_exception,
7024 &throw_out_of_memory_exception,
7025 frame_type,
7026 false,
7027 false);
7028
7029 // Do space-specific GC and retry runtime call.
7030 GenerateCore(masm,
7031 &throw_normal_exception,
7032 &throw_termination_exception,
7033 &throw_out_of_memory_exception,
7034 frame_type,
7035 true,
7036 false);
7037
7038 // Do full GC and retry runtime call one final time.
7039 Failure* failure = Failure::InternalError();
7040 __ movq(rax, failure, RelocInfo::NONE);
7041 GenerateCore(masm,
7042 &throw_normal_exception,
7043 &throw_termination_exception,
7044 &throw_out_of_memory_exception,
7045 frame_type,
7046 true,
7047 true);
7048
7049 __ bind(&throw_out_of_memory_exception);
7050 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
7051
7052 __ bind(&throw_termination_exception);
7053 GenerateThrowUncatchable(masm, TERMINATION);
7054
7055 __ bind(&throw_normal_exception);
7056 GenerateThrowTOS(masm);
7057}
7058
7059
7060void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
7061 Label invoke, exit;
7062#ifdef ENABLE_LOGGING_AND_PROFILING
7063 Label not_outermost_js, not_outermost_js_2;
7064#endif
7065
7066 // Setup frame.
7067 __ push(rbp);
7068 __ movq(rbp, rsp);
7069
7070 // Push the stack frame type marker twice.
7071 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
7072 __ push(Immediate(Smi::FromInt(marker))); // context slot
7073 __ push(Immediate(Smi::FromInt(marker))); // function slot
7074 // Save callee-saved registers (X64 calling conventions).
7075 __ push(r12);
7076 __ push(r13);
7077 __ push(r14);
7078 __ push(r15);
7079 __ push(rdi);
7080 __ push(rsi);
7081 __ push(rbx);
7082 // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
7083 // callee-save in JS code as well.
7084
7085 // Save copies of the top frame descriptor on the stack.
7086 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
7087 __ load_rax(c_entry_fp);
7088 __ push(rax);
7089
7090#ifdef ENABLE_LOGGING_AND_PROFILING
7091 // If this is the outermost JS call, set js_entry_sp value.
7092 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
7093 __ load_rax(js_entry_sp);
7094 __ testq(rax, rax);
7095 __ j(not_zero, &not_outermost_js);
7096 __ movq(rax, rbp);
7097 __ store_rax(js_entry_sp);
7098 __ bind(&not_outermost_js);
7099#endif
7100
7101 // Call a faked try-block that does the invoke.
7102 __ call(&invoke);
7103
7104 // Caught exception: Store result (exception) in the pending
7105 // exception field in the JSEnv and return a failure sentinel.
7106 ExternalReference pending_exception(Top::k_pending_exception_address);
7107 __ store_rax(pending_exception);
7108 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
7109 __ jmp(&exit);
7110
7111 // Invoke: Link this frame into the handler chain.
7112 __ bind(&invoke);
7113 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
7114
7115 // Clear any pending exceptions.
7116 __ load_rax(ExternalReference::the_hole_value_location());
7117 __ store_rax(pending_exception);
7118
7119 // Fake a receiver (NULL).
7120 __ push(Immediate(0)); // receiver
7121
7122 // Invoke the function by calling through JS entry trampoline
7123 // builtin and pop the faked function when we return. We load the address
7124 // from an external reference instead of inlining the call target address
7125 // directly in the code, because the builtin stubs may not have been
7126 // generated yet at the time this code is generated.
7127 if (is_construct) {
7128 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
7129 __ load_rax(construct_entry);
7130 } else {
7131 ExternalReference entry(Builtins::JSEntryTrampoline);
7132 __ load_rax(entry);
7133 }
7134 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
7135 __ call(kScratchRegister);
7136
7137 // Unlink this frame from the handler chain.
7138 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
7139 __ pop(Operand(kScratchRegister, 0));
7140 // Pop next_sp.
7141 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
7142
7143#ifdef ENABLE_LOGGING_AND_PROFILING
7144 // If current EBP value is the same as js_entry_sp value, it means that
7145 // the current function is the outermost.
7146 __ movq(kScratchRegister, js_entry_sp);
7147 __ cmpq(rbp, Operand(kScratchRegister, 0));
7148 __ j(not_equal, &not_outermost_js_2);
7149 __ movq(Operand(kScratchRegister, 0), Immediate(0));
7150 __ bind(&not_outermost_js_2);
7151#endif
7152
7153 // Restore the top frame descriptor from the stack.
7154 __ bind(&exit);
7155 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
7156 __ pop(Operand(kScratchRegister, 0));
7157
7158 // Restore callee-saved registers (X64 conventions).
7159 __ pop(rbx);
7160 __ pop(rsi);
7161 __ pop(rdi);
7162 __ pop(r15);
7163 __ pop(r14);
7164 __ pop(r13);
7165 __ pop(r12);
7166 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
7167
7168 // Restore frame pointer and return.
7169 __ pop(rbp);
7170 __ ret(0);
7171}
7172
7173
7174// -----------------------------------------------------------------------------
7175// Implementation of stubs.
7176
7177// Stub classes have public member named masm, not masm_.
7178
7179void StackCheckStub::Generate(MacroAssembler* masm) {
7180 // Because builtins always remove the receiver from the stack, we
7181 // have to fake one to avoid underflowing the stack. The receiver
7182 // must be inserted below the return address on the stack so we
7183 // temporarily store that in a register.
7184 __ pop(rax);
7185 __ push(Immediate(Smi::FromInt(0)));
7186 __ push(rax);
7187
7188 // Do tail-call to runtime routine.
7189 Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
7190 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
7191}
7192
7193
7194void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
7195 Label* need_gc,
7196 Register scratch,
7197 Register result) {
7198 // Allocate heap number in new space.
7199 __ AllocateInNewSpace(HeapNumber::kSize,
7200 result,
7201 scratch,
7202 no_reg,
7203 need_gc,
7204 TAG_OBJECT);
7205
7206 // Set the map and tag the result.
7207 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
7208 __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
7209}
7210
7211
7212void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7213 Register number) {
7214 Label load_smi, done;
7215
7216 __ JumpIfSmi(number, &load_smi);
7217 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7218 __ jmp(&done);
7219
7220 __ bind(&load_smi);
7221 __ SmiToInteger32(number, number);
7222 __ push(number);
7223 __ fild_s(Operand(rsp, 0));
7224 __ pop(number);
7225
7226 __ bind(&done);
7227}
7228
7229
7230void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7231 Register src,
7232 XMMRegister dst) {
7233 Label load_smi, done;
7234
7235 __ JumpIfSmi(src, &load_smi);
7236 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
7237 __ jmp(&done);
7238
7239 __ bind(&load_smi);
7240 __ SmiToInteger32(src, src);
7241 __ cvtlsi2sd(dst, src);
7242
7243 __ bind(&done);
7244}
7245
7246
7247void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7248 XMMRegister dst1,
7249 XMMRegister dst2) {
7250 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7251 LoadFloatOperand(masm, kScratchRegister, dst1);
7252 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7253 LoadFloatOperand(masm, kScratchRegister, dst2);
7254}
7255
7256
7257void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
7258 const Operand& src,
7259 Register dst) {
7260 // TODO(X64): Convert number operands to int32 values.
7261 // Don't convert a Smi to a double first.
7262 UNIMPLEMENTED();
7263}
7264
7265
7266void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
7267 Label load_smi_1, load_smi_2, done_load_1, done;
7268 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7269 __ JumpIfSmi(kScratchRegister, &load_smi_1);
7270 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7271 __ bind(&done_load_1);
7272
7273 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7274 __ JumpIfSmi(kScratchRegister, &load_smi_2);
7275 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7276 __ jmp(&done);
7277
7278 __ bind(&load_smi_1);
7279 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7280 __ push(kScratchRegister);
7281 __ fild_s(Operand(rsp, 0));
7282 __ pop(kScratchRegister);
7283 __ jmp(&done_load_1);
7284
7285 __ bind(&load_smi_2);
7286 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7287 __ push(kScratchRegister);
7288 __ fild_s(Operand(rsp, 0));
7289 __ pop(kScratchRegister);
7290
7291 __ bind(&done);
7292}
7293
7294
7295void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7296 Register lhs,
7297 Register rhs) {
7298 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
7299 __ JumpIfSmi(lhs, &load_smi_lhs);
7300 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
7301 __ bind(&done_load_lhs);
7302
7303 __ JumpIfSmi(rhs, &load_smi_rhs);
7304 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
7305 __ jmp(&done);
7306
7307 __ bind(&load_smi_lhs);
7308 __ SmiToInteger64(kScratchRegister, lhs);
7309 __ push(kScratchRegister);
7310 __ fild_d(Operand(rsp, 0));
7311 __ pop(kScratchRegister);
7312 __ jmp(&done_load_lhs);
7313
7314 __ bind(&load_smi_rhs);
7315 __ SmiToInteger64(kScratchRegister, rhs);
7316 __ push(kScratchRegister);
7317 __ fild_d(Operand(rsp, 0));
7318 __ pop(kScratchRegister);
7319
7320 __ bind(&done);
7321}
7322
7323
7324void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
7325 Label* non_float) {
7326 Label test_other, done;
7327 // Test if both operands are numbers (heap_numbers or smis).
7328 // If not, jump to label non_float.
7329 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
7330 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
7331 __ j(not_equal, non_float); // The argument in rdx is not a number.
7332
7333 __ bind(&test_other);
7334 __ JumpIfSmi(rax, &done); // argument in rax is OK
7335 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
7336 __ j(not_equal, non_float); // The argument in rax is not a number.
7337
7338 // Fall-through: Both operands are numbers.
7339 __ bind(&done);
7340}
7341
7342
7343const char* GenericBinaryOpStub::GetName() {
7344 switch (op_) {
7345 case Token::ADD: return "GenericBinaryOpStub_ADD";
7346 case Token::SUB: return "GenericBinaryOpStub_SUB";
7347 case Token::MUL: return "GenericBinaryOpStub_MUL";
7348 case Token::DIV: return "GenericBinaryOpStub_DIV";
7349 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
7350 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
7351 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
7352 case Token::SAR: return "GenericBinaryOpStub_SAR";
7353 case Token::SHL: return "GenericBinaryOpStub_SHL";
7354 case Token::SHR: return "GenericBinaryOpStub_SHR";
7355 default: return "GenericBinaryOpStub";
7356 }
7357}
7358
7359
7360void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7361 // Perform fast-case smi code for the operation (rax <op> rbx) and
7362 // leave result in register rax.
7363
7364 // Smi check both operands.
7365 __ JumpIfNotBothSmi(rax, rbx, slow);
7366
7367 switch (op_) {
7368 case Token::ADD: {
7369 __ SmiAdd(rax, rax, rbx, slow);
7370 break;
7371 }
7372
7373 case Token::SUB: {
7374 __ SmiSub(rax, rax, rbx, slow);
7375 break;
7376 }
7377
7378 case Token::MUL:
7379 __ SmiMul(rax, rax, rbx, slow);
7380 break;
7381
7382 case Token::DIV:
7383 __ SmiDiv(rax, rax, rbx, slow);
7384 break;
7385
7386 case Token::MOD:
7387 __ SmiMod(rax, rax, rbx, slow);
7388 break;
7389
7390 case Token::BIT_OR:
7391 __ SmiOr(rax, rax, rbx);
7392 break;
7393
7394 case Token::BIT_AND:
7395 __ SmiAnd(rax, rax, rbx);
7396 break;
7397
7398 case Token::BIT_XOR:
7399 __ SmiXor(rax, rax, rbx);
7400 break;
7401
7402 case Token::SHL:
7403 case Token::SHR:
7404 case Token::SAR:
7405 // Move the second operand into register ecx.
7406 __ movl(rcx, rbx);
7407 // Perform the operation.
7408 switch (op_) {
7409 case Token::SAR:
7410 __ SmiShiftArithmeticRight(rax, rax, rbx);
7411 break;
7412 case Token::SHR:
7413 __ SmiShiftLogicalRight(rax, rax, rbx, slow);
7414 break;
7415 case Token::SHL:
7416 __ SmiShiftLeft(rax, rax, rbx, slow);
7417 break;
7418 default:
7419 UNREACHABLE();
7420 }
7421 break;
7422
7423 default:
7424 UNREACHABLE();
7425 break;
7426 }
7427}
7428
7429
7430void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7431 Label call_runtime;
7432 if (flags_ == SMI_CODE_IN_STUB) {
7433 // The fast case smi code wasn't inlined in the stub caller
7434 // code. Generate it here to speed up common operations.
7435 Label slow;
7436 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
7437 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
7438 GenerateSmiCode(masm, &slow);
7439 __ ret(2 * kPointerSize); // remove both operands
7440
7441 // Too bad. The fast case smi code didn't succeed.
7442 __ bind(&slow);
7443 }
7444
7445 // Setup registers.
7446 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
7447 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
7448
7449 // Floating point case.
7450 switch (op_) {
7451 case Token::ADD:
7452 case Token::SUB:
7453 case Token::MUL:
7454 case Token::DIV: {
7455 // rax: y
7456 // rdx: x
7457 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
7458 // Fast-case: Both operands are numbers.
7459 // Allocate a heap number, if needed.
7460 Label skip_allocation;
7461 switch (mode_) {
7462 case OVERWRITE_LEFT:
7463 __ movq(rax, rdx);
7464 // Fall through!
7465 case OVERWRITE_RIGHT:
7466 // If the argument in rax is already an object, we skip the
7467 // allocation of a heap number.
7468 __ JumpIfNotSmi(rax, &skip_allocation);
7469 // Fall through!
7470 case NO_OVERWRITE:
7471 FloatingPointHelper::AllocateHeapNumber(masm,
7472 &call_runtime,
7473 rcx,
7474 rax);
7475 __ bind(&skip_allocation);
7476 break;
7477 default: UNREACHABLE();
7478 }
7479 // xmm4 and xmm5 are volatile XMM registers.
7480 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
7481
7482 switch (op_) {
7483 case Token::ADD: __ addsd(xmm4, xmm5); break;
7484 case Token::SUB: __ subsd(xmm4, xmm5); break;
7485 case Token::MUL: __ mulsd(xmm4, xmm5); break;
7486 case Token::DIV: __ divsd(xmm4, xmm5); break;
7487 default: UNREACHABLE();
7488 }
7489 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
7490 __ ret(2 * kPointerSize);
7491 }
7492 case Token::MOD: {
7493 // For MOD we go directly to runtime in the non-smi case.
7494 break;
7495 }
7496 case Token::BIT_OR:
7497 case Token::BIT_AND:
7498 case Token::BIT_XOR:
7499 case Token::SAR:
7500 case Token::SHL:
7501 case Token::SHR: {
7502 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
7503 // TODO(X64): Don't convert a Smi to float and then back to int32
7504 // afterwards.
7505 FloatingPointHelper::LoadFloatOperands(masm);
7506
7507 Label skip_allocation, non_smi_result, operand_conversion_failure;
7508
7509 // Reserve space for converted numbers.
7510 __ subq(rsp, Immediate(2 * kPointerSize));
7511
7512 if (use_sse3_) {
7513 // Truncate the operands to 32-bit integers and check for
7514 // exceptions in doing so.
7515 CpuFeatures::Scope scope(CpuFeatures::SSE3);
7516 __ fisttp_s(Operand(rsp, 0 * kPointerSize));
7517 __ fisttp_s(Operand(rsp, 1 * kPointerSize));
7518 __ fnstsw_ax();
7519 __ testl(rax, Immediate(1));
7520 __ j(not_zero, &operand_conversion_failure);
7521 } else {
7522 // Check if right operand is int32.
7523 __ fist_s(Operand(rsp, 0 * kPointerSize));
7524 __ fild_s(Operand(rsp, 0 * kPointerSize));
7525 __ fucompp();
7526 __ fnstsw_ax();
7527 if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
7528 __ sahf();
7529 __ j(not_zero, &operand_conversion_failure);
7530 __ j(parity_even, &operand_conversion_failure);
7531 } else {
7532 __ and_(rax, Immediate(0x4400));
7533 __ cmpl(rax, Immediate(0x4000));
7534 __ j(not_zero, &operand_conversion_failure);
7535 }
7536 // Check if left operand is int32.
7537 __ fist_s(Operand(rsp, 1 * kPointerSize));
7538 __ fild_s(Operand(rsp, 1 * kPointerSize));
7539 __ fucompp();
7540 __ fnstsw_ax();
7541 if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
7542 __ sahf();
7543 __ j(not_zero, &operand_conversion_failure);
7544 __ j(parity_even, &operand_conversion_failure);
7545 } else {
7546 __ and_(rax, Immediate(0x4400));
7547 __ cmpl(rax, Immediate(0x4000));
7548 __ j(not_zero, &operand_conversion_failure);
7549 }
7550 }
7551
7552 // Get int32 operands and perform bitop.
7553 __ pop(rcx);
7554 __ pop(rax);
7555 switch (op_) {
7556 case Token::BIT_OR: __ or_(rax, rcx); break;
7557 case Token::BIT_AND: __ and_(rax, rcx); break;
7558 case Token::BIT_XOR: __ xor_(rax, rcx); break;
7559 case Token::SAR: __ sarl(rax); break;
7560 case Token::SHL: __ shll(rax); break;
7561 case Token::SHR: __ shrl(rax); break;
7562 default: UNREACHABLE();
7563 }
7564 if (op_ == Token::SHR) {
7565 // Check if result is non-negative and fits in a smi.
7566 __ testl(rax, Immediate(0xc0000000));
7567 __ j(not_zero, &non_smi_result);
7568 } else {
7569 // Check if result fits in a smi.
7570 __ cmpl(rax, Immediate(0xc0000000));
7571 __ j(negative, &non_smi_result);
7572 }
7573 // Tag smi result and return.
7574 __ Integer32ToSmi(rax, rax);
7575 __ ret(2 * kPointerSize);
7576
7577 // All ops except SHR return a signed int32 that we load in a HeapNumber.
7578 if (op_ != Token::SHR) {
7579 __ bind(&non_smi_result);
7580 // Allocate a heap number if needed.
7581 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
7582 switch (mode_) {
7583 case OVERWRITE_LEFT:
7584 case OVERWRITE_RIGHT:
7585 // If the operand was an object, we skip the
7586 // allocation of a heap number.
7587 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7588 1 * kPointerSize : 2 * kPointerSize));
7589 __ JumpIfNotSmi(rax, &skip_allocation);
7590 // Fall through!
7591 case NO_OVERWRITE:
7592 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
7593 rcx, rax);
7594 __ bind(&skip_allocation);
7595 break;
7596 default: UNREACHABLE();
7597 }
7598 // Store the result in the HeapNumber and return.
7599 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
7600 __ fild_s(Operand(rsp, 1 * kPointerSize));
7601 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
7602 __ ret(2 * kPointerSize);
7603 }
7604
7605 // Clear the FPU exception flag and reset the stack before calling
7606 // the runtime system.
7607 __ bind(&operand_conversion_failure);
7608 __ addq(rsp, Immediate(2 * kPointerSize));
7609 if (use_sse3_) {
7610 // If we've used the SSE3 instructions for truncating the
7611 // floating point values to integers and it failed, we have a
7612 // pending #IA exception. Clear it.
7613 __ fnclex();
7614 } else {
7615 // The non-SSE3 variant does early bailout if the right
7616 // operand isn't a 32-bit integer, so we may have a single
7617 // value on the FPU stack we need to get rid of.
7618 __ ffree(0);
7619 }
7620
7621 // SHR should return uint32 - go to runtime for non-smi/negative result.
7622 if (op_ == Token::SHR) {
7623 __ bind(&non_smi_result);
7624 }
7625 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7626 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7627 break;
7628 }
7629 default: UNREACHABLE(); break;
7630 }
7631
7632 // If all else fails, use the runtime system to get the correct
7633 // result.
7634 __ bind(&call_runtime);
7635 switch (op_) {
7636 case Token::ADD:
7637 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
7638 break;
7639 case Token::SUB:
7640 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
7641 break;
7642 case Token::MUL:
7643 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
7644 break;
7645 case Token::DIV:
7646 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
7647 break;
7648 case Token::MOD:
7649 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
7650 break;
7651 case Token::BIT_OR:
7652 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
7653 break;
7654 case Token::BIT_AND:
7655 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
7656 break;
7657 case Token::BIT_XOR:
7658 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
7659 break;
7660 case Token::SAR:
7661 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
7662 break;
7663 case Token::SHL:
7664 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
7665 break;
7666 case Token::SHR:
7667 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
7668 break;
7669 default:
7670 UNREACHABLE();
7671 }
7672}
7673
7674
7675int CompareStub::MinorKey() {
7676 // Encode the two parameters in a unique 16 bit value.
7677 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7678 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7679}
7680
7681
7682#undef __
7683
7684} } // namespace v8::internal