blob: 0029b74796f236c5635b9629ce425cce9c537ccc [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "debug.h"
33#include "ic-inl.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "scopes.h"
37
38namespace v8 {
39namespace internal {
40
41#define __ ACCESS_MASM(masm_)
42
43// -------------------------------------------------------------------------
44// Platform-specific DeferredCode functions.
45
46void DeferredCode::SaveRegisters() {
47 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
48 int action = registers_[i];
49 if (action == kPush) {
50 __ push(RegisterAllocator::ToRegister(i));
51 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
52 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
53 }
54 }
55}
56
Steve Block3ce2e202009-11-05 08:53:23 +000057
Steve Blocka7e24c12009-10-30 11:49:00 +000058void DeferredCode::RestoreRegisters() {
59 // Restore registers in reverse order due to the stack.
60 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
61 int action = registers_[i];
62 if (action == kPush) {
63 __ pop(RegisterAllocator::ToRegister(i));
64 } else if (action != kIgnore) {
65 action &= ~kSyncedFlag;
66 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
67 }
68 }
69}
70
71
72// -------------------------------------------------------------------------
73// CodeGenState implementation.
74
75CodeGenState::CodeGenState(CodeGenerator* owner)
76 : owner_(owner),
77 typeof_state_(NOT_INSIDE_TYPEOF),
78 destination_(NULL),
79 previous_(NULL) {
80 owner_->set_state(this);
81}
82
83
84CodeGenState::CodeGenState(CodeGenerator* owner,
85 TypeofState typeof_state,
86 ControlDestination* destination)
87 : owner_(owner),
88 typeof_state_(typeof_state),
89 destination_(destination),
90 previous_(owner->state()) {
91 owner_->set_state(this);
92}
93
94
95CodeGenState::~CodeGenState() {
96 ASSERT(owner_->state() == this);
97 owner_->set_state(previous_);
98}
99
100
101// -------------------------------------------------------------------------
102// Deferred code objects
103//
104// These subclasses of DeferredCode add pieces of code to the end of generated
105// code. They are branched to from the generated code, and
106// keep some slower code out of the main body of the generated code.
107// Many of them call a code stub or a runtime function.
108
109class DeferredInlineSmiAdd: public DeferredCode {
110 public:
111 DeferredInlineSmiAdd(Register dst,
112 Smi* value,
113 OverwriteMode overwrite_mode)
114 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
115 set_comment("[ DeferredInlineSmiAdd");
116 }
117
118 virtual void Generate();
119
120 private:
121 Register dst_;
122 Smi* value_;
123 OverwriteMode overwrite_mode_;
124};
125
126
127// The result of value + src is in dst. It either overflowed or was not
128// smi tagged. Undo the speculative addition and call the appropriate
129// specialized stub for add. The result is left in dst.
130class DeferredInlineSmiAddReversed: public DeferredCode {
131 public:
132 DeferredInlineSmiAddReversed(Register dst,
133 Smi* value,
134 OverwriteMode overwrite_mode)
135 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
136 set_comment("[ DeferredInlineSmiAddReversed");
137 }
138
139 virtual void Generate();
140
141 private:
142 Register dst_;
143 Smi* value_;
144 OverwriteMode overwrite_mode_;
145};
146
147
148class DeferredInlineSmiSub: public DeferredCode {
149 public:
150 DeferredInlineSmiSub(Register dst,
151 Smi* value,
152 OverwriteMode overwrite_mode)
153 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
154 set_comment("[ DeferredInlineSmiSub");
155 }
156
157 virtual void Generate();
158
159 private:
160 Register dst_;
161 Smi* value_;
162 OverwriteMode overwrite_mode_;
163};
164
165
166// Call the appropriate binary operation stub to compute src op value
167// and leave the result in dst.
168class DeferredInlineSmiOperation: public DeferredCode {
169 public:
170 DeferredInlineSmiOperation(Token::Value op,
171 Register dst,
172 Register src,
173 Smi* value,
174 OverwriteMode overwrite_mode)
175 : op_(op),
176 dst_(dst),
177 src_(src),
178 value_(value),
179 overwrite_mode_(overwrite_mode) {
180 set_comment("[ DeferredInlineSmiOperation");
181 }
182
183 virtual void Generate();
184
185 private:
186 Token::Value op_;
187 Register dst_;
188 Register src_;
189 Smi* value_;
190 OverwriteMode overwrite_mode_;
191};
192
193
194class FloatingPointHelper : public AllStatic {
195 public:
196 // Code pattern for loading a floating point value. Input value must
197 // be either a smi or a heap number object (fp value). Requirements:
198 // operand on TOS+1. Returns operand as floating point number on FPU
199 // stack.
200 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
201
202 // Code pattern for loading a floating point value. Input value must
203 // be either a smi or a heap number object (fp value). Requirements:
204 // operand in src register. Returns operand as floating point number
205 // in XMM register
206 static void LoadFloatOperand(MacroAssembler* masm,
207 Register src,
208 XMMRegister dst);
209
210 // Code pattern for loading floating point values. Input values must
211 // be either smi or heap number objects (fp values). Requirements:
212 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
213 // floating point numbers in XMM registers.
214 static void LoadFloatOperands(MacroAssembler* masm,
215 XMMRegister dst1,
216 XMMRegister dst2);
217
218 // Code pattern for loading floating point values onto the fp stack.
219 // Input values must be either smi or heap number objects (fp values).
220 // Requirements:
221 // Register version: operands in registers lhs and rhs.
222 // Stack version: operands on TOS+1 and TOS+2.
223 // Returns operands as floating point numbers on fp stack.
224 static void LoadFloatOperands(MacroAssembler* masm);
225 static void LoadFloatOperands(MacroAssembler* masm,
226 Register lhs,
227 Register rhs);
228
229 // Code pattern for loading a floating point value and converting it
230 // to a 32 bit integer. Input value must be either a smi or a heap number
231 // object.
232 // Returns operands as 32-bit sign extended integers in a general purpose
233 // registers.
234 static void LoadInt32Operand(MacroAssembler* masm,
235 const Operand& src,
236 Register dst);
237
238 // Test if operands are smi or number objects (fp). Requirements:
239 // operand_1 in rax, operand_2 in rdx; falls through on float or smi
240 // operands, jumps to the non_float label otherwise.
Steve Block3ce2e202009-11-05 08:53:23 +0000241 static void CheckNumberOperands(MacroAssembler* masm,
242 Label* non_float);
Steve Blocka7e24c12009-10-30 11:49:00 +0000243};
244
245
246// -----------------------------------------------------------------------------
247// CodeGenerator implementation.
248
249CodeGenerator::CodeGenerator(int buffer_size,
250 Handle<Script> script,
251 bool is_eval)
252 : is_eval_(is_eval),
253 script_(script),
254 deferred_(8),
255 masm_(new MacroAssembler(NULL, buffer_size)),
256 scope_(NULL),
257 frame_(NULL),
258 allocator_(NULL),
259 state_(NULL),
260 loop_nesting_(0),
261 function_return_is_shadowed_(false),
262 in_spilled_code_(false) {
263}
264
265
266void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
267 // Call the runtime to declare the globals. The inevitable call
268 // will sync frame elements to memory anyway, so we do it eagerly to
269 // allow us to push the arguments directly into place.
270 frame_->SyncRange(0, frame_->element_count() - 1);
271
272 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
Steve Block3ce2e202009-11-05 08:53:23 +0000273 frame_->EmitPush(rsi); // The context is the first argument.
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 frame_->EmitPush(kScratchRegister);
Steve Block3ce2e202009-11-05 08:53:23 +0000275 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
277 // Return value is ignored.
278}
279
280
281void CodeGenerator::GenCode(FunctionLiteral* function) {
282 // Record the position for debugging purposes.
283 CodeForFunctionPosition(function);
284 ZoneList<Statement*>* body = function->body();
285
286 // Initialize state.
287 ASSERT(scope_ == NULL);
288 scope_ = function->scope();
289 ASSERT(allocator_ == NULL);
290 RegisterAllocator register_allocator(this);
291 allocator_ = &register_allocator;
292 ASSERT(frame_ == NULL);
293 frame_ = new VirtualFrame();
294 set_in_spilled_code(false);
295
296 // Adjust for function-level loop nesting.
297 loop_nesting_ += function->loop_nesting();
298
299 JumpTarget::set_compiling_deferred_code(false);
300
301#ifdef DEBUG
302 if (strlen(FLAG_stop_at) > 0 &&
303 function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
304 frame_->SpillAll();
305 __ int3();
306 }
307#endif
308
309 // New scope to get automatic timing calculation.
310 { // NOLINT
311 HistogramTimerScope codegen_timer(&Counters::code_generation);
312 CodeGenState state(this);
313
314 // Entry:
315 // Stack: receiver, arguments, return address.
316 // rbp: caller's frame pointer
317 // rsp: stack pointer
318 // rdi: called JS function
319 // rsi: callee's context
320 allocator_->Initialize();
321 frame_->Enter();
322
323 // Allocate space for locals and initialize them.
324 frame_->AllocateStackSlots();
325 // Initialize the function return target after the locals are set
326 // up, because it needs the expected frame height from the frame.
327 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
328 function_return_is_shadowed_ = false;
329
330 // Allocate the local context if needed.
331 if (scope_->num_heap_slots() > 0) {
332 Comment cmnt(masm_, "[ allocate local context");
333 // Allocate local context.
334 // Get outer context and create a new context based on it.
335 frame_->PushFunction();
336 Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
337
338 // Update context local.
339 frame_->SaveContextRegister();
340
341 // Verify that the runtime call result and rsi agree.
342 if (FLAG_debug_code) {
343 __ cmpq(context.reg(), rsi);
344 __ Assert(equal, "Runtime::NewContext should end up in rsi");
345 }
346 }
347
348 // TODO(1241774): Improve this code:
349 // 1) only needed if we have a context
350 // 2) no need to recompute context ptr every single time
351 // 3) don't copy parameter operand code from SlotOperand!
352 {
353 Comment cmnt2(masm_, "[ copy context parameters into .context");
354
355 // Note that iteration order is relevant here! If we have the same
356 // parameter twice (e.g., function (x, y, x)), and that parameter
357 // needs to be copied into the context, it must be the last argument
358 // passed to the parameter that needs to be copied. This is a rare
359 // case so we don't check for it, instead we rely on the copying
360 // order: such a parameter is copied repeatedly into the same
361 // context location and thus the last value is what is seen inside
362 // the function.
363 for (int i = 0; i < scope_->num_parameters(); i++) {
364 Variable* par = scope_->parameter(i);
365 Slot* slot = par->slot();
366 if (slot != NULL && slot->type() == Slot::CONTEXT) {
367 // The use of SlotOperand below is safe in unspilled code
368 // because the slot is guaranteed to be a context slot.
369 //
370 // There are no parameters in the global scope.
371 ASSERT(!scope_->is_global_scope());
372 frame_->PushParameterAt(i);
373 Result value = frame_->Pop();
374 value.ToRegister();
375
376 // SlotOperand loads context.reg() with the context object
377 // stored to, used below in RecordWrite.
378 Result context = allocator_->Allocate();
379 ASSERT(context.is_valid());
380 __ movq(SlotOperand(slot, context.reg()), value.reg());
381 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
382 Result scratch = allocator_->Allocate();
383 ASSERT(scratch.is_valid());
384 frame_->Spill(context.reg());
385 frame_->Spill(value.reg());
386 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
387 }
388 }
389 }
390
391 // Store the arguments object. This must happen after context
392 // initialization because the arguments object may be stored in
393 // the context.
394 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
395 StoreArgumentsObject(true);
396 }
397
398 // Generate code to 'execute' declarations and initialize functions
399 // (source elements). In case of an illegal redeclaration we need to
400 // handle that instead of processing the declarations.
401 if (scope_->HasIllegalRedeclaration()) {
402 Comment cmnt(masm_, "[ illegal redeclarations");
403 scope_->VisitIllegalRedeclaration(this);
404 } else {
405 Comment cmnt(masm_, "[ declarations");
406 ProcessDeclarations(scope_->declarations());
407 // Bail out if a stack-overflow exception occurred when processing
408 // declarations.
409 if (HasStackOverflow()) return;
410 }
411
412 if (FLAG_trace) {
413 frame_->CallRuntime(Runtime::kTraceEnter, 0);
414 // Ignore the return value.
415 }
416 CheckStack();
417
418 // Compile the body of the function in a vanilla state. Don't
419 // bother compiling all the code if the scope has an illegal
420 // redeclaration.
421 if (!scope_->HasIllegalRedeclaration()) {
422 Comment cmnt(masm_, "[ function body");
423#ifdef DEBUG
424 bool is_builtin = Bootstrapper::IsActive();
425 bool should_trace =
426 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
427 if (should_trace) {
428 frame_->CallRuntime(Runtime::kDebugTrace, 0);
429 // Ignore the return value.
430 }
431#endif
432 VisitStatements(body);
433
434 // Handle the return from the function.
435 if (has_valid_frame()) {
436 // If there is a valid frame, control flow can fall off the end of
437 // the body. In that case there is an implicit return statement.
438 ASSERT(!function_return_is_shadowed_);
439 CodeForReturnPosition(function);
440 frame_->PrepareForReturn();
441 Result undefined(Factory::undefined_value());
442 if (function_return_.is_bound()) {
443 function_return_.Jump(&undefined);
444 } else {
445 function_return_.Bind(&undefined);
446 GenerateReturnSequence(&undefined);
447 }
448 } else if (function_return_.is_linked()) {
449 // If the return target has dangling jumps to it, then we have not
450 // yet generated the return sequence. This can happen when (a)
451 // control does not flow off the end of the body so we did not
452 // compile an artificial return statement just above, and (b) there
453 // are return statements in the body but (c) they are all shadowed.
454 Result return_value;
455 function_return_.Bind(&return_value);
456 GenerateReturnSequence(&return_value);
457 }
458 }
459 }
460
461 // Adjust for function-level loop nesting.
462 loop_nesting_ -= function->loop_nesting();
463
464 // Code generation state must be reset.
465 ASSERT(state_ == NULL);
466 ASSERT(loop_nesting() == 0);
467 ASSERT(!function_return_is_shadowed_);
468 function_return_.Unuse();
469 DeleteFrame();
470
471 // Process any deferred code using the register allocator.
472 if (!HasStackOverflow()) {
473 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
474 JumpTarget::set_compiling_deferred_code(true);
475 ProcessDeferred();
476 JumpTarget::set_compiling_deferred_code(false);
477 }
478
479 // There is no need to delete the register allocator, it is a
480 // stack-allocated local.
481 allocator_ = NULL;
482 scope_ = NULL;
483}
484
485void CodeGenerator::GenerateReturnSequence(Result* return_value) {
486 // The return value is a live (but not currently reference counted)
487 // reference to rax. This is safe because the current frame does not
488 // contain a reference to rax (it is prepared for the return by spilling
489 // all registers).
490 if (FLAG_trace) {
491 frame_->Push(return_value);
492 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
493 }
494 return_value->ToRegister(rax);
495
496 // Add a label for checking the size of the code used for returning.
497#ifdef DEBUG
498 Label check_exit_codesize;
499 masm_->bind(&check_exit_codesize);
500#endif
501
502 // Leave the frame and return popping the arguments and the
503 // receiver.
504 frame_->Exit();
505 masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
506#ifdef ENABLE_DEBUGGER_SUPPORT
507 // Add padding that will be overwritten by a debugger breakpoint.
508 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
509 // with length 7 (3 + 1 + 3).
510 const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
511 for (int i = 0; i < kPadding; ++i) {
512 masm_->int3();
513 }
514 // Check that the size of the code used for returning matches what is
515 // expected by the debugger.
516 ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
517 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
518#endif
519 DeleteFrame();
520}
521
522
523#ifdef DEBUG
524bool CodeGenerator::HasValidEntryRegisters() {
525 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
526 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
527 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
528 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
529 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
530 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
531 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
532 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
533 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
534 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
535 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
536}
537#endif
538
539
540class DeferredReferenceGetKeyedValue: public DeferredCode {
541 public:
542 explicit DeferredReferenceGetKeyedValue(Register dst,
543 Register receiver,
544 Register key,
545 bool is_global)
546 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
547 set_comment("[ DeferredReferenceGetKeyedValue");
548 }
549
550 virtual void Generate();
551
552 Label* patch_site() { return &patch_site_; }
553
554 private:
555 Label patch_site_;
556 Register dst_;
557 Register receiver_;
558 Register key_;
559 bool is_global_;
560};
561
562
563void DeferredReferenceGetKeyedValue::Generate() {
564 __ push(receiver_); // First IC argument.
565 __ push(key_); // Second IC argument.
566
567 // Calculate the delta from the IC call instruction to the map check
568 // movq instruction in the inlined version. This delta is stored in
569 // a test(rax, delta) instruction after the call so that we can find
570 // it in the IC initialization code and patch the movq instruction.
571 // This means that we cannot allow test instructions after calls to
572 // KeyedLoadIC stubs in other places.
573 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
574 RelocInfo::Mode mode = is_global_
575 ? RelocInfo::CODE_TARGET_CONTEXT
576 : RelocInfo::CODE_TARGET;
577 __ Call(ic, mode);
578 // The delta from the start of the map-compare instruction to the
579 // test instruction. We use masm_-> directly here instead of the __
580 // macro because the macro sometimes uses macro expansion to turn
581 // into something that can't return a value. This is encountered
582 // when doing generated code coverage tests.
583 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
584 // Here we use masm_-> instead of the __ macro because this is the
585 // instruction that gets patched and coverage code gets in the way.
586 // TODO(X64): Consider whether it's worth switching the test to a
587 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
588 // be generated normally.
589 masm_->testl(rax, Immediate(-delta_to_patch_site));
590 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
591
592 if (!dst_.is(rax)) __ movq(dst_, rax);
593 __ pop(key_);
594 __ pop(receiver_);
595}
596
597
598class DeferredReferenceSetKeyedValue: public DeferredCode {
599 public:
600 DeferredReferenceSetKeyedValue(Register value,
601 Register key,
602 Register receiver)
603 : value_(value), key_(key), receiver_(receiver) {
604 set_comment("[ DeferredReferenceSetKeyedValue");
605 }
606
607 virtual void Generate();
608
609 Label* patch_site() { return &patch_site_; }
610
611 private:
612 Register value_;
613 Register key_;
614 Register receiver_;
615 Label patch_site_;
616};
617
618
619void DeferredReferenceSetKeyedValue::Generate() {
620 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
621 // Push receiver and key arguments on the stack.
622 __ push(receiver_);
623 __ push(key_);
624 // Move value argument to eax as expected by the IC stub.
625 if (!value_.is(rax)) __ movq(rax, value_);
626 // Call the IC stub.
627 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
628 __ Call(ic, RelocInfo::CODE_TARGET);
629 // The delta from the start of the map-compare instructions (initial movq)
630 // to the test instruction. We use masm_-> directly here instead of the
631 // __ macro because the macro sometimes uses macro expansion to turn
632 // into something that can't return a value. This is encountered
633 // when doing generated code coverage tests.
634 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
635 // Here we use masm_-> instead of the __ macro because this is the
636 // instruction that gets patched and coverage code gets in the way.
637 masm_->testl(rax, Immediate(-delta_to_patch_site));
638 // Restore value (returned from store IC), key and receiver
639 // registers.
640 if (!value_.is(rax)) __ movq(value_, rax);
641 __ pop(key_);
642 __ pop(receiver_);
643}
644
645
646class CallFunctionStub: public CodeStub {
647 public:
648 CallFunctionStub(int argc, InLoopFlag in_loop)
649 : argc_(argc), in_loop_(in_loop) { }
650
651 void Generate(MacroAssembler* masm);
652
653 private:
654 int argc_;
655 InLoopFlag in_loop_;
656
657#ifdef DEBUG
658 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
659#endif
660
661 Major MajorKey() { return CallFunction; }
662 int MinorKey() { return argc_; }
663 InLoopFlag InLoop() { return in_loop_; }
664};
665
666
667void CodeGenerator::CallApplyLazy(Property* apply,
668 Expression* receiver,
669 VariableProxy* arguments,
670 int position) {
671 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
672 ASSERT(arguments->IsArguments());
673
674 JumpTarget slow, done;
675
676 // Load the apply function onto the stack. This will usually
677 // give us a megamorphic load site. Not super, but it works.
678 Reference ref(this, apply);
679 ref.GetValue(NOT_INSIDE_TYPEOF);
680 ASSERT(ref.type() == Reference::NAMED);
681
682 // Load the receiver and the existing arguments object onto the
683 // expression stack. Avoid allocating the arguments object here.
684 Load(receiver);
685 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
686
687 // Emit the source position information after having loaded the
688 // receiver and the arguments.
689 CodeForSourcePosition(position);
690
691 // Check if the arguments object has been lazily allocated
692 // already. If so, just use that instead of copying the arguments
693 // from the stack. This also deals with cases where a local variable
694 // named 'arguments' has been introduced.
695 frame_->Dup();
696 Result probe = frame_->Pop();
697 bool try_lazy = true;
698 if (probe.is_constant()) {
699 try_lazy = probe.handle()->IsTheHole();
700 } else {
701 __ Cmp(probe.reg(), Factory::the_hole_value());
702 probe.Unuse();
703 slow.Branch(not_equal);
704 }
705
706 if (try_lazy) {
707 JumpTarget build_args;
708
709 // Get rid of the arguments object probe.
710 frame_->Drop();
711
712 // Before messing with the execution stack, we sync all
713 // elements. This is bound to happen anyway because we're
714 // about to call a function.
715 frame_->SyncRange(0, frame_->element_count() - 1);
716
717 // Check that the receiver really is a JavaScript object.
718 {
719 frame_->PushElementAt(0);
720 Result receiver = frame_->Pop();
721 receiver.ToRegister();
722 Condition is_smi = masm_->CheckSmi(receiver.reg());
723 build_args.Branch(is_smi);
724 // We allow all JSObjects including JSFunctions. As long as
725 // JS_FUNCTION_TYPE is the last instance type and it is right
726 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
727 // bound.
728 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
729 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
730 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
731 build_args.Branch(below);
732 }
733
734 // Verify that we're invoking Function.prototype.apply.
735 {
736 frame_->PushElementAt(1);
737 Result apply = frame_->Pop();
738 apply.ToRegister();
739 Condition is_smi = masm_->CheckSmi(apply.reg());
740 build_args.Branch(is_smi);
741 Result tmp = allocator_->Allocate();
742 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
743 build_args.Branch(not_equal);
744 __ movq(tmp.reg(),
745 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
746 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
747 __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
748 apply_code);
749 build_args.Branch(not_equal);
750 }
751
752 // Get the function receiver from the stack. Check that it
753 // really is a function.
754 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
755 Condition is_smi = masm_->CheckSmi(rdi);
756 build_args.Branch(is_smi);
757 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
758 build_args.Branch(not_equal);
759
760 // Copy the arguments to this function possibly from the
761 // adaptor frame below it.
762 Label invoke, adapted;
763 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +0000764 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
765 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 __ j(equal, &adapted);
767
768 // No arguments adaptor frame. Copy fixed number of arguments.
769 __ movq(rax, Immediate(scope_->num_parameters()));
770 for (int i = 0; i < scope_->num_parameters(); i++) {
771 __ push(frame_->ParameterAt(i));
772 }
773 __ jmp(&invoke);
774
775 // Arguments adaptor frame present. Copy arguments from there, but
776 // avoid copying too many arguments to avoid stack overflows.
777 __ bind(&adapted);
778 static const uint32_t kArgumentsLimit = 1 * KB;
779 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
780 __ SmiToInteger32(rax, rax);
781 __ movq(rcx, rax);
782 __ cmpq(rax, Immediate(kArgumentsLimit));
783 build_args.Branch(above);
784
785 // Loop through the arguments pushing them onto the execution
786 // stack. We don't inform the virtual frame of the push, so we don't
787 // have to worry about getting rid of the elements from the virtual
788 // frame.
789 Label loop;
Steve Blocka7e24c12009-10-30 11:49:00 +0000790 __ testl(rcx, rcx);
791 __ j(zero, &invoke);
Steve Block3ce2e202009-11-05 08:53:23 +0000792 __ bind(&loop);
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
794 __ decl(rcx);
Steve Block3ce2e202009-11-05 08:53:23 +0000795 __ j(not_zero, &loop);
Steve Blocka7e24c12009-10-30 11:49:00 +0000796
797 // Invoke the function. The virtual frame knows about the receiver
798 // so make sure to forget that explicitly.
799 __ bind(&invoke);
800 ParameterCount actual(rax);
801 __ InvokeFunction(rdi, actual, CALL_FUNCTION);
802 frame_->Forget(1);
803 Result result = allocator()->Allocate(rax);
804 frame_->SetElementAt(0, &result);
805 done.Jump();
806
807 // Slow-case: Allocate the arguments object since we know it isn't
808 // there, and fall-through to the slow-case where we call
809 // Function.prototype.apply.
810 build_args.Bind();
811 Result arguments_object = StoreArgumentsObject(false);
812 frame_->Push(&arguments_object);
813 slow.Bind();
814 }
815
816 // Flip the apply function and the function to call on the stack, so
817 // the function looks like the receiver of the apply call. This way,
818 // the generic Function.prototype.apply implementation can deal with
819 // the call like it usually does.
820 Result a2 = frame_->Pop();
821 Result a1 = frame_->Pop();
822 Result ap = frame_->Pop();
823 Result fn = frame_->Pop();
824 frame_->Push(&ap);
825 frame_->Push(&fn);
826 frame_->Push(&a1);
827 frame_->Push(&a2);
828 CallFunctionStub call_function(2, NOT_IN_LOOP);
829 Result res = frame_->CallStub(&call_function, 3);
830 frame_->Push(&res);
831
832 // All done. Restore context register after call.
833 if (try_lazy) done.Bind();
834 frame_->RestoreContextRegister();
835}
836
837
838class DeferredStackCheck: public DeferredCode {
839 public:
840 DeferredStackCheck() {
841 set_comment("[ DeferredStackCheck");
842 }
843
844 virtual void Generate();
845};
846
847
848void DeferredStackCheck::Generate() {
849 StackCheckStub stub;
850 __ CallStub(&stub);
851}
852
853
854void CodeGenerator::CheckStack() {
855 if (FLAG_check_stack) {
856 DeferredStackCheck* deferred = new DeferredStackCheck;
857 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
858 deferred->Branch(below);
859 deferred->BindExit();
860 }
861}
862
863
864void CodeGenerator::VisitAndSpill(Statement* statement) {
865 // TODO(X64): No architecture specific code. Move to shared location.
866 ASSERT(in_spilled_code());
867 set_in_spilled_code(false);
868 Visit(statement);
869 if (frame_ != NULL) {
870 frame_->SpillAll();
871 }
872 set_in_spilled_code(true);
873}
874
875
876void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
877 ASSERT(in_spilled_code());
878 set_in_spilled_code(false);
879 VisitStatements(statements);
880 if (frame_ != NULL) {
881 frame_->SpillAll();
882 }
883 set_in_spilled_code(true);
884}
885
886
887void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
888 ASSERT(!in_spilled_code());
889 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
890 Visit(statements->at(i));
891 }
892}
893
894
895void CodeGenerator::VisitBlock(Block* node) {
896 ASSERT(!in_spilled_code());
897 Comment cmnt(masm_, "[ Block");
898 CodeForStatementPosition(node);
899 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
900 VisitStatements(node->statements());
901 if (node->break_target()->is_linked()) {
902 node->break_target()->Bind();
903 }
904 node->break_target()->Unuse();
905}
906
907
908void CodeGenerator::VisitDeclaration(Declaration* node) {
909 Comment cmnt(masm_, "[ Declaration");
910 Variable* var = node->proxy()->var();
911 ASSERT(var != NULL); // must have been resolved
912 Slot* slot = var->slot();
913
914 // If it was not possible to allocate the variable at compile time,
915 // we need to "declare" it at runtime to make sure it actually
916 // exists in the local context.
917 if (slot != NULL && slot->type() == Slot::LOOKUP) {
918 // Variables with a "LOOKUP" slot were introduced as non-locals
919 // during variable resolution and must have mode DYNAMIC.
920 ASSERT(var->is_dynamic());
921 // For now, just do a runtime call. Sync the virtual frame eagerly
922 // so we can simply push the arguments into place.
923 frame_->SyncRange(0, frame_->element_count() - 1);
924 frame_->EmitPush(rsi);
925 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
926 frame_->EmitPush(kScratchRegister);
927 // Declaration nodes are always introduced in one of two modes.
928 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
929 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
Steve Block3ce2e202009-11-05 08:53:23 +0000930 frame_->EmitPush(Smi::FromInt(attr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000931 // Push initial value, if any.
932 // Note: For variables we must not push an initial value (such as
933 // 'undefined') because we may have a (legal) redeclaration and we
934 // must not destroy the current value.
935 if (node->mode() == Variable::CONST) {
936 frame_->EmitPush(Heap::kTheHoleValueRootIndex);
937 } else if (node->fun() != NULL) {
938 Load(node->fun());
939 } else {
Steve Block3ce2e202009-11-05 08:53:23 +0000940 frame_->EmitPush(Smi::FromInt(0)); // no initial value!
Steve Blocka7e24c12009-10-30 11:49:00 +0000941 }
942 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
943 // Ignore the return value (declarations are statements).
944 return;
945 }
946
947 ASSERT(!var->is_global());
948
949 // If we have a function or a constant, we need to initialize the variable.
950 Expression* val = NULL;
951 if (node->mode() == Variable::CONST) {
952 val = new Literal(Factory::the_hole_value());
953 } else {
954 val = node->fun(); // NULL if we don't have a function
955 }
956
957 if (val != NULL) {
958 {
959 // Set the initial value.
960 Reference target(this, node->proxy());
961 Load(val);
962 target.SetValue(NOT_CONST_INIT);
963 // The reference is removed from the stack (preserving TOS) when
964 // it goes out of scope.
965 }
966 // Get rid of the assigned value (declarations are statements).
967 frame_->Drop();
968 }
969}
970
971
972void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
973 ASSERT(!in_spilled_code());
974 Comment cmnt(masm_, "[ ExpressionStatement");
975 CodeForStatementPosition(node);
976 Expression* expression = node->expression();
977 expression->MarkAsStatement();
978 Load(expression);
979 // Remove the lingering expression result from the top of stack.
980 frame_->Drop();
981}
982
983
984void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
985 ASSERT(!in_spilled_code());
986 Comment cmnt(masm_, "// EmptyStatement");
987 CodeForStatementPosition(node);
988 // nothing to do
989}
990
991
992void CodeGenerator::VisitIfStatement(IfStatement* node) {
993 ASSERT(!in_spilled_code());
994 Comment cmnt(masm_, "[ IfStatement");
995 // Generate different code depending on which parts of the if statement
996 // are present or not.
997 bool has_then_stm = node->HasThenStatement();
998 bool has_else_stm = node->HasElseStatement();
999
1000 CodeForStatementPosition(node);
1001 JumpTarget exit;
1002 if (has_then_stm && has_else_stm) {
1003 JumpTarget then;
1004 JumpTarget else_;
1005 ControlDestination dest(&then, &else_, true);
1006 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1007
1008 if (dest.false_was_fall_through()) {
1009 // The else target was bound, so we compile the else part first.
1010 Visit(node->else_statement());
1011
1012 // We may have dangling jumps to the then part.
1013 if (then.is_linked()) {
1014 if (has_valid_frame()) exit.Jump();
1015 then.Bind();
1016 Visit(node->then_statement());
1017 }
1018 } else {
1019 // The then target was bound, so we compile the then part first.
1020 Visit(node->then_statement());
1021
1022 if (else_.is_linked()) {
1023 if (has_valid_frame()) exit.Jump();
1024 else_.Bind();
1025 Visit(node->else_statement());
1026 }
1027 }
1028
1029 } else if (has_then_stm) {
1030 ASSERT(!has_else_stm);
1031 JumpTarget then;
1032 ControlDestination dest(&then, &exit, true);
1033 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1034
1035 if (dest.false_was_fall_through()) {
1036 // The exit label was bound. We may have dangling jumps to the
1037 // then part.
1038 if (then.is_linked()) {
1039 exit.Unuse();
1040 exit.Jump();
1041 then.Bind();
1042 Visit(node->then_statement());
1043 }
1044 } else {
1045 // The then label was bound.
1046 Visit(node->then_statement());
1047 }
1048
1049 } else if (has_else_stm) {
1050 ASSERT(!has_then_stm);
1051 JumpTarget else_;
1052 ControlDestination dest(&exit, &else_, false);
1053 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
1054
1055 if (dest.true_was_fall_through()) {
1056 // The exit label was bound. We may have dangling jumps to the
1057 // else part.
1058 if (else_.is_linked()) {
1059 exit.Unuse();
1060 exit.Jump();
1061 else_.Bind();
1062 Visit(node->else_statement());
1063 }
1064 } else {
1065 // The else label was bound.
1066 Visit(node->else_statement());
1067 }
1068
1069 } else {
1070 ASSERT(!has_then_stm && !has_else_stm);
1071 // We only care about the condition's side effects (not its value
1072 // or control flow effect). LoadCondition is called without
1073 // forcing control flow.
1074 ControlDestination dest(&exit, &exit, true);
1075 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
1076 if (!dest.is_used()) {
1077 // We got a value on the frame rather than (or in addition to)
1078 // control flow.
1079 frame_->Drop();
1080 }
1081 }
1082
1083 if (exit.is_linked()) {
1084 exit.Bind();
1085 }
1086}
1087
1088
1089void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1090 ASSERT(!in_spilled_code());
1091 Comment cmnt(masm_, "[ ContinueStatement");
1092 CodeForStatementPosition(node);
1093 node->target()->continue_target()->Jump();
1094}
1095
1096
1097void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1098 ASSERT(!in_spilled_code());
1099 Comment cmnt(masm_, "[ BreakStatement");
1100 CodeForStatementPosition(node);
1101 node->target()->break_target()->Jump();
1102}
1103
1104
1105void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1106 ASSERT(!in_spilled_code());
1107 Comment cmnt(masm_, "[ ReturnStatement");
1108
1109 CodeForStatementPosition(node);
1110 Load(node->expression());
1111 Result return_value = frame_->Pop();
1112 if (function_return_is_shadowed_) {
1113 function_return_.Jump(&return_value);
1114 } else {
1115 frame_->PrepareForReturn();
1116 if (function_return_.is_bound()) {
1117 // If the function return label is already bound we reuse the
1118 // code by jumping to the return site.
1119 function_return_.Jump(&return_value);
1120 } else {
1121 function_return_.Bind(&return_value);
1122 GenerateReturnSequence(&return_value);
1123 }
1124 }
1125}
1126
1127
1128void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1129 ASSERT(!in_spilled_code());
1130 Comment cmnt(masm_, "[ WithEnterStatement");
1131 CodeForStatementPosition(node);
1132 Load(node->expression());
1133 Result context;
1134 if (node->is_catch_block()) {
1135 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1136 } else {
1137 context = frame_->CallRuntime(Runtime::kPushContext, 1);
1138 }
1139
1140 // Update context local.
1141 frame_->SaveContextRegister();
1142
1143 // Verify that the runtime call result and rsi agree.
1144 if (FLAG_debug_code) {
1145 __ cmpq(context.reg(), rsi);
1146 __ Assert(equal, "Runtime::NewContext should end up in rsi");
1147 }
1148}
1149
1150
1151void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1152 ASSERT(!in_spilled_code());
1153 Comment cmnt(masm_, "[ WithExitStatement");
1154 CodeForStatementPosition(node);
1155 // Pop context.
1156 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1157 // Update context local.
1158 frame_->SaveContextRegister();
1159}
1160
1161
1162void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1163 // TODO(X64): This code is completely generic and should be moved somewhere
1164 // where it can be shared between architectures.
1165 ASSERT(!in_spilled_code());
1166 Comment cmnt(masm_, "[ SwitchStatement");
1167 CodeForStatementPosition(node);
1168 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1169
1170 // Compile the switch value.
1171 Load(node->tag());
1172
1173 ZoneList<CaseClause*>* cases = node->cases();
1174 int length = cases->length();
1175 CaseClause* default_clause = NULL;
1176
1177 JumpTarget next_test;
1178 // Compile the case label expressions and comparisons. Exit early
1179 // if a comparison is unconditionally true. The target next_test is
1180 // bound before the loop in order to indicate control flow to the
1181 // first comparison.
1182 next_test.Bind();
1183 for (int i = 0; i < length && !next_test.is_unused(); i++) {
1184 CaseClause* clause = cases->at(i);
1185 // The default is not a test, but remember it for later.
1186 if (clause->is_default()) {
1187 default_clause = clause;
1188 continue;
1189 }
1190
1191 Comment cmnt(masm_, "[ Case comparison");
1192 // We recycle the same target next_test for each test. Bind it if
1193 // the previous test has not done so and then unuse it for the
1194 // loop.
1195 if (next_test.is_linked()) {
1196 next_test.Bind();
1197 }
1198 next_test.Unuse();
1199
1200 // Duplicate the switch value.
1201 frame_->Dup();
1202
1203 // Compile the label expression.
1204 Load(clause->label());
1205
1206 // Compare and branch to the body if true or the next test if
1207 // false. Prefer the next test as a fall through.
1208 ControlDestination dest(clause->body_target(), &next_test, false);
1209 Comparison(equal, true, &dest);
1210
1211 // If the comparison fell through to the true target, jump to the
1212 // actual body.
1213 if (dest.true_was_fall_through()) {
1214 clause->body_target()->Unuse();
1215 clause->body_target()->Jump();
1216 }
1217 }
1218
1219 // If there was control flow to a next test from the last one
1220 // compiled, compile a jump to the default or break target.
1221 if (!next_test.is_unused()) {
1222 if (next_test.is_linked()) {
1223 next_test.Bind();
1224 }
1225 // Drop the switch value.
1226 frame_->Drop();
1227 if (default_clause != NULL) {
1228 default_clause->body_target()->Jump();
1229 } else {
1230 node->break_target()->Jump();
1231 }
1232 }
1233
1234 // The last instruction emitted was a jump, either to the default
1235 // clause or the break target, or else to a case body from the loop
1236 // that compiles the tests.
1237 ASSERT(!has_valid_frame());
1238 // Compile case bodies as needed.
1239 for (int i = 0; i < length; i++) {
1240 CaseClause* clause = cases->at(i);
1241
1242 // There are two ways to reach the body: from the corresponding
1243 // test or as the fall through of the previous body.
1244 if (clause->body_target()->is_linked() || has_valid_frame()) {
1245 if (clause->body_target()->is_linked()) {
1246 if (has_valid_frame()) {
1247 // If we have both a jump to the test and a fall through, put
1248 // a jump on the fall through path to avoid the dropping of
1249 // the switch value on the test path. The exception is the
1250 // default which has already had the switch value dropped.
1251 if (clause->is_default()) {
1252 clause->body_target()->Bind();
1253 } else {
1254 JumpTarget body;
1255 body.Jump();
1256 clause->body_target()->Bind();
1257 frame_->Drop();
1258 body.Bind();
1259 }
1260 } else {
1261 // No fall through to worry about.
1262 clause->body_target()->Bind();
1263 if (!clause->is_default()) {
1264 frame_->Drop();
1265 }
1266 }
1267 } else {
1268 // Otherwise, we have only fall through.
1269 ASSERT(has_valid_frame());
1270 }
1271
1272 // We are now prepared to compile the body.
1273 Comment cmnt(masm_, "[ Case body");
1274 VisitStatements(clause->statements());
1275 }
1276 clause->body_target()->Unuse();
1277 }
1278
1279 // We may not have a valid frame here so bind the break target only
1280 // if needed.
1281 if (node->break_target()->is_linked()) {
1282 node->break_target()->Bind();
1283 }
1284 node->break_target()->Unuse();
1285}
1286
1287
Steve Block3ce2e202009-11-05 08:53:23 +00001288void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001289 ASSERT(!in_spilled_code());
Steve Block3ce2e202009-11-05 08:53:23 +00001290 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001291 CodeForStatementPosition(node);
1292 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001293 JumpTarget body(JumpTarget::BIDIRECTIONAL);
1294 IncrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001295
Steve Block3ce2e202009-11-05 08:53:23 +00001296 ConditionAnalysis info = AnalyzeCondition(node->cond());
1297 // Label the top of the loop for the backward jump if necessary.
1298 switch (info) {
1299 case ALWAYS_TRUE:
1300 // Use the continue target.
1301 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1302 node->continue_target()->Bind();
1303 break;
1304 case ALWAYS_FALSE:
1305 // No need to label it.
1306 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1307 break;
1308 case DONT_KNOW:
1309 // Continue is the test, so use the backward body target.
1310 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1311 body.Bind();
1312 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001313 }
1314
Steve Block3ce2e202009-11-05 08:53:23 +00001315 CheckStack(); // TODO(1222600): ignore if body contains calls.
1316 Visit(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001317
Steve Block3ce2e202009-11-05 08:53:23 +00001318 // Compile the test.
1319 switch (info) {
1320 case ALWAYS_TRUE:
1321 // If control flow can fall off the end of the body, jump back
1322 // to the top and bind the break target at the exit.
1323 if (has_valid_frame()) {
1324 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326 if (node->break_target()->is_linked()) {
1327 node->break_target()->Bind();
1328 }
1329 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001330 case ALWAYS_FALSE:
1331 // We may have had continues or breaks in the body.
1332 if (node->continue_target()->is_linked()) {
1333 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001334 }
Steve Block3ce2e202009-11-05 08:53:23 +00001335 if (node->break_target()->is_linked()) {
1336 node->break_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001337 }
Steve Block3ce2e202009-11-05 08:53:23 +00001338 break;
1339 case DONT_KNOW:
1340 // We have to compile the test expression if it can be reached by
1341 // control flow falling out of the body or via continue.
1342 if (node->continue_target()->is_linked()) {
1343 node->continue_target()->Bind();
1344 }
1345 if (has_valid_frame()) {
1346 ControlDestination dest(&body, node->break_target(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001347 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001348 }
Steve Block3ce2e202009-11-05 08:53:23 +00001349 if (node->break_target()->is_linked()) {
1350 node->break_target()->Bind();
1351 }
1352 break;
1353 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001354
Steve Block3ce2e202009-11-05 08:53:23 +00001355 DecrementLoopNesting();
1356 node->continue_target()->Unuse();
1357 node->break_target()->Unuse();
1358}
Steve Blocka7e24c12009-10-30 11:49:00 +00001359
Steve Block3ce2e202009-11-05 08:53:23 +00001360
1361void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1362 ASSERT(!in_spilled_code());
1363 Comment cmnt(masm_, "[ WhileStatement");
1364 CodeForStatementPosition(node);
1365
1366 // If the condition is always false and has no side effects, we do not
1367 // need to compile anything.
1368 ConditionAnalysis info = AnalyzeCondition(node->cond());
1369 if (info == ALWAYS_FALSE) return;
1370
1371 // Do not duplicate conditions that may have function literal
1372 // subexpressions. This can cause us to compile the function literal
1373 // twice.
1374 bool test_at_bottom = !node->may_have_function_literal();
1375 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1376 IncrementLoopNesting();
1377 JumpTarget body;
1378 if (test_at_bottom) {
1379 body.set_direction(JumpTarget::BIDIRECTIONAL);
1380 }
1381
1382 // Based on the condition analysis, compile the test as necessary.
1383 switch (info) {
1384 case ALWAYS_TRUE:
1385 // We will not compile the test expression. Label the top of the
1386 // loop with the continue target.
1387 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1388 node->continue_target()->Bind();
1389 break;
1390 case DONT_KNOW: {
1391 if (test_at_bottom) {
1392 // Continue is the test at the bottom, no need to label the test
1393 // at the top. The body is a backward target.
1394 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1395 } else {
1396 // Label the test at the top as the continue target. The body
1397 // is a forward-only target.
1398 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1399 node->continue_target()->Bind();
1400 }
1401 // Compile the test with the body as the true target and preferred
1402 // fall-through and with the break target as the false target.
1403 ControlDestination dest(&body, node->break_target(), true);
1404 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1405
1406 if (dest.false_was_fall_through()) {
1407 // If we got the break target as fall-through, the test may have
1408 // been unconditionally false (if there are no jumps to the
1409 // body).
1410 if (!body.is_linked()) {
1411 DecrementLoopNesting();
1412 return;
1413 }
1414
1415 // Otherwise, jump around the body on the fall through and then
1416 // bind the body target.
1417 node->break_target()->Unuse();
1418 node->break_target()->Jump();
1419 body.Bind();
1420 }
1421 break;
1422 }
1423 case ALWAYS_FALSE:
1424 UNREACHABLE();
1425 break;
1426 }
1427
1428 CheckStack(); // TODO(1222600): ignore if body contains calls.
1429 Visit(node->body());
1430
1431 // Based on the condition analysis, compile the backward jump as
1432 // necessary.
1433 switch (info) {
1434 case ALWAYS_TRUE:
1435 // The loop body has been labeled with the continue target.
1436 if (has_valid_frame()) {
1437 node->continue_target()->Jump();
1438 }
1439 break;
1440 case DONT_KNOW:
1441 if (test_at_bottom) {
1442 // If we have chosen to recompile the test at the bottom,
1443 // then it is the continue target.
Steve Blocka7e24c12009-10-30 11:49:00 +00001444 if (node->continue_target()->is_linked()) {
1445 node->continue_target()->Bind();
1446 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001447 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001448 // The break target is the fall-through (body is a backward
1449 // jump from here and thus an invalid fall-through).
1450 ControlDestination dest(&body, node->break_target(), false);
1451 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1452 }
1453 } else {
1454 // If we have chosen not to recompile the test at the
1455 // bottom, jump back to the one at the top.
1456 if (has_valid_frame()) {
1457 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001458 }
1459 }
Steve Block3ce2e202009-11-05 08:53:23 +00001460 break;
1461 case ALWAYS_FALSE:
1462 UNREACHABLE();
1463 break;
1464 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001465
Steve Block3ce2e202009-11-05 08:53:23 +00001466 // The break target may be already bound (by the condition), or there
1467 // may not be a valid frame. Bind it only if needed.
1468 if (node->break_target()->is_linked()) {
1469 node->break_target()->Bind();
1470 }
1471 DecrementLoopNesting();
1472}
1473
1474
1475void CodeGenerator::VisitForStatement(ForStatement* node) {
1476 ASSERT(!in_spilled_code());
1477 Comment cmnt(masm_, "[ ForStatement");
1478 CodeForStatementPosition(node);
1479
1480 // Compile the init expression if present.
1481 if (node->init() != NULL) {
1482 Visit(node->init());
1483 }
1484
1485 // If the condition is always false and has no side effects, we do not
1486 // need to compile anything else.
1487 ConditionAnalysis info = AnalyzeCondition(node->cond());
1488 if (info == ALWAYS_FALSE) return;
1489
1490 // Do not duplicate conditions that may have function literal
1491 // subexpressions. This can cause us to compile the function literal
1492 // twice.
1493 bool test_at_bottom = !node->may_have_function_literal();
1494 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1495 IncrementLoopNesting();
1496
1497 // Target for backward edge if no test at the bottom, otherwise
1498 // unused.
1499 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1500
1501 // Target for backward edge if there is a test at the bottom,
1502 // otherwise used as target for test at the top.
1503 JumpTarget body;
1504 if (test_at_bottom) {
1505 body.set_direction(JumpTarget::BIDIRECTIONAL);
1506 }
1507
1508 // Based on the condition analysis, compile the test as necessary.
1509 switch (info) {
1510 case ALWAYS_TRUE:
1511 // We will not compile the test expression. Label the top of the
1512 // loop.
1513 if (node->next() == NULL) {
1514 // Use the continue target if there is no update expression.
1515 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1516 node->continue_target()->Bind();
1517 } else {
1518 // Otherwise use the backward loop target.
1519 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1520 loop.Bind();
1521 }
1522 break;
1523 case DONT_KNOW: {
1524 if (test_at_bottom) {
1525 // Continue is either the update expression or the test at the
1526 // bottom, no need to label the test at the top.
1527 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1528 } else if (node->next() == NULL) {
1529 // We are not recompiling the test at the bottom and there is no
1530 // update expression.
1531 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1532 node->continue_target()->Bind();
1533 } else {
1534 // We are not recompiling the test at the bottom and there is an
1535 // update expression.
1536 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1537 loop.Bind();
1538 }
1539
1540 // Compile the test with the body as the true target and preferred
1541 // fall-through and with the break target as the false target.
1542 ControlDestination dest(&body, node->break_target(), true);
1543 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1544
1545 if (dest.false_was_fall_through()) {
1546 // If we got the break target as fall-through, the test may have
1547 // been unconditionally false (if there are no jumps to the
1548 // body).
1549 if (!body.is_linked()) {
1550 DecrementLoopNesting();
1551 return;
1552 }
1553
1554 // Otherwise, jump around the body on the fall through and then
1555 // bind the body target.
1556 node->break_target()->Unuse();
1557 node->break_target()->Jump();
1558 body.Bind();
1559 }
1560 break;
1561 }
1562 case ALWAYS_FALSE:
1563 UNREACHABLE();
1564 break;
1565 }
1566
1567 CheckStack(); // TODO(1222600): ignore if body contains calls.
1568 Visit(node->body());
1569
1570 // If there is an update expression, compile it if necessary.
1571 if (node->next() != NULL) {
1572 if (node->continue_target()->is_linked()) {
1573 node->continue_target()->Bind();
1574 }
1575
1576 // Control can reach the update by falling out of the body or by a
1577 // continue.
1578 if (has_valid_frame()) {
1579 // Record the source position of the statement as this code which
1580 // is after the code for the body actually belongs to the loop
1581 // statement and not the body.
1582 CodeForStatementPosition(node);
1583 Visit(node->next());
1584 }
1585 }
1586
1587 // Based on the condition analysis, compile the backward jump as
1588 // necessary.
1589 switch (info) {
1590 case ALWAYS_TRUE:
1591 if (has_valid_frame()) {
1592 if (node->next() == NULL) {
1593 node->continue_target()->Jump();
1594 } else {
1595 loop.Jump();
1596 }
1597 }
1598 break;
1599 case DONT_KNOW:
1600 if (test_at_bottom) {
1601 if (node->continue_target()->is_linked()) {
1602 // We can have dangling jumps to the continue target if there
1603 // was no update expression.
1604 node->continue_target()->Bind();
1605 }
1606 // Control can reach the test at the bottom by falling out of
1607 // the body, by a continue in the body, or from the update
1608 // expression.
1609 if (has_valid_frame()) {
1610 // The break target is the fall-through (body is a backward
1611 // jump from here).
1612 ControlDestination dest(&body, node->break_target(), false);
1613 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
1614 }
1615 } else {
1616 // Otherwise, jump back to the test at the top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001617 if (has_valid_frame()) {
1618 if (node->next() == NULL) {
1619 node->continue_target()->Jump();
1620 } else {
1621 loop.Jump();
1622 }
1623 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001624 }
1625 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001626 case ALWAYS_FALSE:
1627 UNREACHABLE();
1628 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001629 }
1630
Steve Block3ce2e202009-11-05 08:53:23 +00001631 // The break target may be already bound (by the condition), or there
1632 // may not be a valid frame. Bind it only if needed.
1633 if (node->break_target()->is_linked()) {
1634 node->break_target()->Bind();
1635 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001636 DecrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001637}
1638
1639
1640void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1641 ASSERT(!in_spilled_code());
1642 VirtualFrame::SpilledScope spilled_scope;
1643 Comment cmnt(masm_, "[ ForInStatement");
1644 CodeForStatementPosition(node);
1645
1646 JumpTarget primitive;
1647 JumpTarget jsobject;
1648 JumpTarget fixed_array;
1649 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1650 JumpTarget end_del_check;
1651 JumpTarget exit;
1652
1653 // Get the object to enumerate over (converted to JSObject).
1654 LoadAndSpill(node->enumerable());
1655
1656 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1657 // to the specification. 12.6.4 mandates a call to ToObject.
1658 frame_->EmitPop(rax);
1659
1660 // rax: value to be iterated over
1661 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1662 exit.Branch(equal);
1663 __ CompareRoot(rax, Heap::kNullValueRootIndex);
1664 exit.Branch(equal);
1665
1666 // Stack layout in body:
1667 // [iteration counter (smi)] <- slot 0
1668 // [length of array] <- slot 1
1669 // [FixedArray] <- slot 2
1670 // [Map or 0] <- slot 3
1671 // [Object] <- slot 4
1672
1673 // Check if enumerable is already a JSObject
1674 // rax: value to be iterated over
1675 Condition is_smi = masm_->CheckSmi(rax);
1676 primitive.Branch(is_smi);
1677 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1678 jsobject.Branch(above_equal);
1679
1680 primitive.Bind();
1681 frame_->EmitPush(rax);
1682 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1683 // function call returns the value in rax, which is where we want it below
1684
1685 jsobject.Bind();
1686 // Get the set of properties (as a FixedArray or Map).
1687 // rax: value to be iterated over
1688 frame_->EmitPush(rax); // push the object being iterated over (slot 4)
1689
1690 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
1691 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1692
1693 // If we got a Map, we can do a fast modification check.
1694 // Otherwise, we got a FixedArray, and we have to do a slow check.
1695 // rax: map or fixed array (result from call to
1696 // Runtime::kGetPropertyNamesFast)
1697 __ movq(rdx, rax);
1698 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1699 __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1700 fixed_array.Branch(not_equal);
1701
1702 // Get enum cache
1703 // rax: map (result from call to Runtime::kGetPropertyNamesFast)
1704 __ movq(rcx, rax);
1705 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1706 // Get the bridge array held in the enumeration index field.
1707 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1708 // Get the cache from the bridge array.
1709 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1710
1711 frame_->EmitPush(rax); // <- slot 3
1712 frame_->EmitPush(rdx); // <- slot 2
1713 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1714 __ Integer32ToSmi(rax, rax);
1715 frame_->EmitPush(rax); // <- slot 1
Steve Block3ce2e202009-11-05 08:53:23 +00001716 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
Steve Blocka7e24c12009-10-30 11:49:00 +00001717 entry.Jump();
1718
1719 fixed_array.Bind();
1720 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
Steve Block3ce2e202009-11-05 08:53:23 +00001721 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
Steve Blocka7e24c12009-10-30 11:49:00 +00001722 frame_->EmitPush(rax); // <- slot 2
1723
1724 // Push the length of the array and the initial index onto the stack.
1725 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1726 __ Integer32ToSmi(rax, rax);
1727 frame_->EmitPush(rax); // <- slot 1
Steve Block3ce2e202009-11-05 08:53:23 +00001728 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
Steve Blocka7e24c12009-10-30 11:49:00 +00001729
1730 // Condition.
1731 entry.Bind();
1732 // Grab the current frame's height for the break and continue
1733 // targets only after all the state is pushed on the frame.
1734 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1735 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1736
1737 __ movq(rax, frame_->ElementAt(0)); // load the current count
Steve Block3ce2e202009-11-05 08:53:23 +00001738 __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
1739 node->break_target()->Branch(below_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00001740
1741 // Get the i'th entry of the array.
1742 __ movq(rdx, frame_->ElementAt(2));
1743 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1744 __ movq(rbx,
1745 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1746
1747 // Get the expected map from the stack or a zero map in the
1748 // permanent slow case rax: current iteration count rbx: i'th entry
1749 // of the enum cache
1750 __ movq(rdx, frame_->ElementAt(3));
1751 // Check if the expected map still matches that of the enumerable.
1752 // If not, we have to filter the key.
1753 // rax: current iteration count
1754 // rbx: i'th entry of the enum cache
1755 // rdx: expected map value
1756 __ movq(rcx, frame_->ElementAt(4));
1757 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1758 __ cmpq(rcx, rdx);
1759 end_del_check.Branch(equal);
1760
1761 // Convert the entry to a string (or null if it isn't a property anymore).
1762 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
1763 frame_->EmitPush(rbx); // push entry
1764 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1765 __ movq(rbx, rax);
1766
1767 // If the property has been removed while iterating, we just skip it.
1768 __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1769 node->continue_target()->Branch(equal);
1770
1771 end_del_check.Bind();
1772 // Store the entry in the 'each' expression and take another spin in the
1773 // loop. rdx: i'th entry of the enum cache (or string there of)
1774 frame_->EmitPush(rbx);
1775 { Reference each(this, node->each());
1776 // Loading a reference may leave the frame in an unspilled state.
1777 frame_->SpillAll();
1778 if (!each.is_illegal()) {
1779 if (each.size() > 0) {
1780 frame_->EmitPush(frame_->ElementAt(each.size()));
1781 }
1782 // If the reference was to a slot we rely on the convenient property
1783 // that it doesn't matter whether a value (eg, ebx pushed above) is
1784 // right on top of or right underneath a zero-sized reference.
1785 each.SetValue(NOT_CONST_INIT);
1786 if (each.size() > 0) {
1787 // It's safe to pop the value lying on top of the reference before
1788 // unloading the reference itself (which preserves the top of stack,
1789 // ie, now the topmost value of the non-zero sized reference), since
1790 // we will discard the top of stack after unloading the reference
1791 // anyway.
1792 frame_->Drop();
1793 }
1794 }
1795 }
1796 // Unloading a reference may leave the frame in an unspilled state.
1797 frame_->SpillAll();
1798
1799 // Discard the i'th entry pushed above or else the remainder of the
1800 // reference, whichever is currently on top of the stack.
1801 frame_->Drop();
1802
1803 // Body.
1804 CheckStack(); // TODO(1222600): ignore if body contains calls.
1805 VisitAndSpill(node->body());
1806
1807 // Next. Reestablish a spilled frame in case we are coming here via
1808 // a continue in the body.
1809 node->continue_target()->Bind();
1810 frame_->SpillAll();
1811 frame_->EmitPop(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00001812 __ SmiAddConstant(rax, rax, Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001813 frame_->EmitPush(rax);
1814 entry.Jump();
1815
1816 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1817 // any frame.
1818 node->break_target()->Bind();
1819 frame_->Drop(5);
1820
1821 // Exit.
1822 exit.Bind();
1823
1824 node->continue_target()->Unuse();
1825 node->break_target()->Unuse();
1826}
1827
Steve Block3ce2e202009-11-05 08:53:23 +00001828void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 ASSERT(!in_spilled_code());
1830 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001831 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001832 CodeForStatementPosition(node);
1833
1834 JumpTarget try_block;
1835 JumpTarget exit;
1836
1837 try_block.Call();
1838 // --- Catch block ---
1839 frame_->EmitPush(rax);
1840
1841 // Store the caught exception in the catch variable.
1842 { Reference ref(this, node->catch_var());
1843 ASSERT(ref.is_slot());
1844 // Load the exception to the top of the stack. Here we make use of the
1845 // convenient property that it doesn't matter whether a value is
1846 // immediately on top of or underneath a zero-sized reference.
1847 ref.SetValue(NOT_CONST_INIT);
1848 }
1849
1850 // Remove the exception from the stack.
1851 frame_->Drop();
1852
1853 VisitStatementsAndSpill(node->catch_block()->statements());
1854 if (has_valid_frame()) {
1855 exit.Jump();
1856 }
1857
1858
1859 // --- Try block ---
1860 try_block.Bind();
1861
1862 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1863 int handler_height = frame_->height();
1864
1865 // Shadow the jump targets for all escapes from the try block, including
1866 // returns. During shadowing, the original target is hidden as the
1867 // ShadowTarget and operations on the original actually affect the
1868 // shadowing target.
1869 //
1870 // We should probably try to unify the escaping targets and the return
1871 // target.
1872 int nof_escapes = node->escaping_targets()->length();
1873 List<ShadowTarget*> shadows(1 + nof_escapes);
1874
1875 // Add the shadow target for the function return.
1876 static const int kReturnShadowIndex = 0;
1877 shadows.Add(new ShadowTarget(&function_return_));
1878 bool function_return_was_shadowed = function_return_is_shadowed_;
1879 function_return_is_shadowed_ = true;
1880 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1881
1882 // Add the remaining shadow targets.
1883 for (int i = 0; i < nof_escapes; i++) {
1884 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1885 }
1886
1887 // Generate code for the statements in the try block.
1888 VisitStatementsAndSpill(node->try_block()->statements());
1889
1890 // Stop the introduced shadowing and count the number of required unlinks.
1891 // After shadowing stops, the original targets are unshadowed and the
1892 // ShadowTargets represent the formerly shadowing targets.
1893 bool has_unlinks = false;
1894 for (int i = 0; i < shadows.length(); i++) {
1895 shadows[i]->StopShadowing();
1896 has_unlinks = has_unlinks || shadows[i]->is_linked();
1897 }
1898 function_return_is_shadowed_ = function_return_was_shadowed;
1899
1900 // Get an external reference to the handler address.
1901 ExternalReference handler_address(Top::k_handler_address);
1902
1903 // Make sure that there's nothing left on the stack above the
1904 // handler structure.
1905 if (FLAG_debug_code) {
1906 __ movq(kScratchRegister, handler_address);
1907 __ cmpq(rsp, Operand(kScratchRegister, 0));
1908 __ Assert(equal, "stack pointer should point to top handler");
1909 }
1910
1911 // If we can fall off the end of the try block, unlink from try chain.
1912 if (has_valid_frame()) {
1913 // The next handler address is on top of the frame. Unlink from
1914 // the handler list and drop the rest of this handler from the
1915 // frame.
1916 ASSERT(StackHandlerConstants::kNextOffset == 0);
1917 __ movq(kScratchRegister, handler_address);
1918 frame_->EmitPop(Operand(kScratchRegister, 0));
1919 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1920 if (has_unlinks) {
1921 exit.Jump();
1922 }
1923 }
1924
1925 // Generate unlink code for the (formerly) shadowing targets that
1926 // have been jumped to. Deallocate each shadow target.
1927 Result return_value;
1928 for (int i = 0; i < shadows.length(); i++) {
1929 if (shadows[i]->is_linked()) {
1930 // Unlink from try chain; be careful not to destroy the TOS if
1931 // there is one.
1932 if (i == kReturnShadowIndex) {
1933 shadows[i]->Bind(&return_value);
1934 return_value.ToRegister(rax);
1935 } else {
1936 shadows[i]->Bind();
1937 }
1938 // Because we can be jumping here (to spilled code) from
1939 // unspilled code, we need to reestablish a spilled frame at
1940 // this block.
1941 frame_->SpillAll();
1942
1943 // Reload sp from the top handler, because some statements that we
1944 // break from (eg, for...in) may have left stuff on the stack.
1945 __ movq(kScratchRegister, handler_address);
1946 __ movq(rsp, Operand(kScratchRegister, 0));
1947 frame_->Forget(frame_->height() - handler_height);
1948
1949 ASSERT(StackHandlerConstants::kNextOffset == 0);
1950 __ movq(kScratchRegister, handler_address);
1951 frame_->EmitPop(Operand(kScratchRegister, 0));
1952 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1953
1954 if (i == kReturnShadowIndex) {
1955 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
1956 shadows[i]->other_target()->Jump(&return_value);
1957 } else {
1958 shadows[i]->other_target()->Jump();
1959 }
1960 }
1961 }
1962
1963 exit.Bind();
1964}
1965
1966
Steve Block3ce2e202009-11-05 08:53:23 +00001967void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001968 ASSERT(!in_spilled_code());
1969 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001970 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001971 CodeForStatementPosition(node);
1972
1973 // State: Used to keep track of reason for entering the finally
1974 // block. Should probably be extended to hold information for
1975 // break/continue from within the try block.
1976 enum { FALLING, THROWING, JUMPING };
1977
1978 JumpTarget try_block;
1979 JumpTarget finally_block;
1980
1981 try_block.Call();
1982
1983 frame_->EmitPush(rax);
1984 // In case of thrown exceptions, this is where we continue.
Steve Block3ce2e202009-11-05 08:53:23 +00001985 __ Move(rcx, Smi::FromInt(THROWING));
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 finally_block.Jump();
1987
1988 // --- Try block ---
1989 try_block.Bind();
1990
1991 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
1992 int handler_height = frame_->height();
1993
1994 // Shadow the jump targets for all escapes from the try block, including
1995 // returns. During shadowing, the original target is hidden as the
1996 // ShadowTarget and operations on the original actually affect the
1997 // shadowing target.
1998 //
1999 // We should probably try to unify the escaping targets and the return
2000 // target.
2001 int nof_escapes = node->escaping_targets()->length();
2002 List<ShadowTarget*> shadows(1 + nof_escapes);
2003
2004 // Add the shadow target for the function return.
2005 static const int kReturnShadowIndex = 0;
2006 shadows.Add(new ShadowTarget(&function_return_));
2007 bool function_return_was_shadowed = function_return_is_shadowed_;
2008 function_return_is_shadowed_ = true;
2009 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2010
2011 // Add the remaining shadow targets.
2012 for (int i = 0; i < nof_escapes; i++) {
2013 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2014 }
2015
2016 // Generate code for the statements in the try block.
2017 VisitStatementsAndSpill(node->try_block()->statements());
2018
2019 // Stop the introduced shadowing and count the number of required unlinks.
2020 // After shadowing stops, the original targets are unshadowed and the
2021 // ShadowTargets represent the formerly shadowing targets.
2022 int nof_unlinks = 0;
2023 for (int i = 0; i < shadows.length(); i++) {
2024 shadows[i]->StopShadowing();
2025 if (shadows[i]->is_linked()) nof_unlinks++;
2026 }
2027 function_return_is_shadowed_ = function_return_was_shadowed;
2028
2029 // Get an external reference to the handler address.
2030 ExternalReference handler_address(Top::k_handler_address);
2031
2032 // If we can fall off the end of the try block, unlink from the try
2033 // chain and set the state on the frame to FALLING.
2034 if (has_valid_frame()) {
2035 // The next handler address is on top of the frame.
2036 ASSERT(StackHandlerConstants::kNextOffset == 0);
2037 __ movq(kScratchRegister, handler_address);
2038 frame_->EmitPop(Operand(kScratchRegister, 0));
2039 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2040
2041 // Fake a top of stack value (unneeded when FALLING) and set the
2042 // state in ecx, then jump around the unlink blocks if any.
2043 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
Steve Block3ce2e202009-11-05 08:53:23 +00002044 __ Move(rcx, Smi::FromInt(FALLING));
Steve Blocka7e24c12009-10-30 11:49:00 +00002045 if (nof_unlinks > 0) {
2046 finally_block.Jump();
2047 }
2048 }
2049
2050 // Generate code to unlink and set the state for the (formerly)
2051 // shadowing targets that have been jumped to.
2052 for (int i = 0; i < shadows.length(); i++) {
2053 if (shadows[i]->is_linked()) {
2054 // If we have come from the shadowed return, the return value is
2055 // on the virtual frame. We must preserve it until it is
2056 // pushed.
2057 if (i == kReturnShadowIndex) {
2058 Result return_value;
2059 shadows[i]->Bind(&return_value);
2060 return_value.ToRegister(rax);
2061 } else {
2062 shadows[i]->Bind();
2063 }
2064 // Because we can be jumping here (to spilled code) from
2065 // unspilled code, we need to reestablish a spilled frame at
2066 // this block.
2067 frame_->SpillAll();
2068
2069 // Reload sp from the top handler, because some statements that
2070 // we break from (eg, for...in) may have left stuff on the
2071 // stack.
2072 __ movq(kScratchRegister, handler_address);
2073 __ movq(rsp, Operand(kScratchRegister, 0));
2074 frame_->Forget(frame_->height() - handler_height);
2075
2076 // Unlink this handler and drop it from the frame.
2077 ASSERT(StackHandlerConstants::kNextOffset == 0);
2078 __ movq(kScratchRegister, handler_address);
2079 frame_->EmitPop(Operand(kScratchRegister, 0));
2080 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2081
2082 if (i == kReturnShadowIndex) {
2083 // If this target shadowed the function return, materialize
2084 // the return value on the stack.
2085 frame_->EmitPush(rax);
2086 } else {
2087 // Fake TOS for targets that shadowed breaks and continues.
2088 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2089 }
Steve Block3ce2e202009-11-05 08:53:23 +00002090 __ Move(rcx, Smi::FromInt(JUMPING + i));
Steve Blocka7e24c12009-10-30 11:49:00 +00002091 if (--nof_unlinks > 0) {
2092 // If this is not the last unlink block, jump around the next.
2093 finally_block.Jump();
2094 }
2095 }
2096 }
2097
2098 // --- Finally block ---
2099 finally_block.Bind();
2100
2101 // Push the state on the stack.
2102 frame_->EmitPush(rcx);
2103
2104 // We keep two elements on the stack - the (possibly faked) result
2105 // and the state - while evaluating the finally block.
2106 //
2107 // Generate code for the statements in the finally block.
2108 VisitStatementsAndSpill(node->finally_block()->statements());
2109
2110 if (has_valid_frame()) {
2111 // Restore state and return value or faked TOS.
2112 frame_->EmitPop(rcx);
2113 frame_->EmitPop(rax);
2114 }
2115
2116 // Generate code to jump to the right destination for all used
2117 // formerly shadowing targets. Deallocate each shadow target.
2118 for (int i = 0; i < shadows.length(); i++) {
2119 if (has_valid_frame() && shadows[i]->is_bound()) {
2120 BreakTarget* original = shadows[i]->other_target();
Steve Block3ce2e202009-11-05 08:53:23 +00002121 __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
Steve Blocka7e24c12009-10-30 11:49:00 +00002122 if (i == kReturnShadowIndex) {
2123 // The return value is (already) in rax.
2124 Result return_value = allocator_->Allocate(rax);
2125 ASSERT(return_value.is_valid());
2126 if (function_return_is_shadowed_) {
2127 original->Branch(equal, &return_value);
2128 } else {
2129 // Branch around the preparation for return which may emit
2130 // code.
2131 JumpTarget skip;
2132 skip.Branch(not_equal);
2133 frame_->PrepareForReturn();
2134 original->Jump(&return_value);
2135 skip.Bind();
2136 }
2137 } else {
2138 original->Branch(equal);
2139 }
2140 }
2141 }
2142
2143 if (has_valid_frame()) {
2144 // Check if we need to rethrow the exception.
2145 JumpTarget exit;
Steve Block3ce2e202009-11-05 08:53:23 +00002146 __ SmiCompare(rcx, Smi::FromInt(THROWING));
Steve Blocka7e24c12009-10-30 11:49:00 +00002147 exit.Branch(not_equal);
2148
2149 // Rethrow exception.
2150 frame_->EmitPush(rax); // undo pop from above
2151 frame_->CallRuntime(Runtime::kReThrow, 1);
2152
2153 // Done.
2154 exit.Bind();
2155 }
2156}
2157
2158
2159void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2160 ASSERT(!in_spilled_code());
2161 Comment cmnt(masm_, "[ DebuggerStatement");
2162 CodeForStatementPosition(node);
2163#ifdef ENABLE_DEBUGGER_SUPPORT
2164 // Spill everything, even constants, to the frame.
2165 frame_->SpillAll();
2166 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2167 // Ignore the return value.
2168#endif
2169}
2170
2171
2172void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2173 // Call the runtime to instantiate the function boilerplate object.
2174 // The inevitable call will sync frame elements to memory anyway, so
2175 // we do it eagerly to allow us to push the arguments directly into
2176 // place.
2177 ASSERT(boilerplate->IsBoilerplate());
2178 frame_->SyncRange(0, frame_->element_count() - 1);
2179
Steve Blocka7e24c12009-10-30 11:49:00 +00002180 // Create a new closure.
2181 frame_->EmitPush(rsi);
Steve Block3ce2e202009-11-05 08:53:23 +00002182 __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
2183 frame_->EmitPush(kScratchRegister);
Steve Blocka7e24c12009-10-30 11:49:00 +00002184 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2185 frame_->Push(&result);
2186}
2187
2188
2189void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2190 Comment cmnt(masm_, "[ FunctionLiteral");
2191
2192 // Build the function boilerplate and instantiate it.
2193 Handle<JSFunction> boilerplate = BuildBoilerplate(node);
2194 // Check for stack-overflow exception.
2195 if (HasStackOverflow()) return;
2196 InstantiateBoilerplate(boilerplate);
2197}
2198
2199
2200void CodeGenerator::VisitFunctionBoilerplateLiteral(
2201 FunctionBoilerplateLiteral* node) {
2202 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2203 InstantiateBoilerplate(node->boilerplate());
2204}
2205
2206
2207void CodeGenerator::VisitConditional(Conditional* node) {
2208 Comment cmnt(masm_, "[ Conditional");
2209 JumpTarget then;
2210 JumpTarget else_;
2211 JumpTarget exit;
2212 ControlDestination dest(&then, &else_, true);
2213 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
2214
2215 if (dest.false_was_fall_through()) {
2216 // The else target was bound, so we compile the else part first.
2217 Load(node->else_expression(), typeof_state());
2218
2219 if (then.is_linked()) {
2220 exit.Jump();
2221 then.Bind();
2222 Load(node->then_expression(), typeof_state());
2223 }
2224 } else {
2225 // The then target was bound, so we compile the then part first.
2226 Load(node->then_expression(), typeof_state());
2227
2228 if (else_.is_linked()) {
2229 exit.Jump();
2230 else_.Bind();
2231 Load(node->else_expression(), typeof_state());
2232 }
2233 }
2234
2235 exit.Bind();
2236}
2237
2238
2239void CodeGenerator::VisitSlot(Slot* node) {
2240 Comment cmnt(masm_, "[ Slot");
2241 LoadFromSlotCheckForArguments(node, typeof_state());
2242}
2243
2244
2245void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2246 Comment cmnt(masm_, "[ VariableProxy");
2247 Variable* var = node->var();
2248 Expression* expr = var->rewrite();
2249 if (expr != NULL) {
2250 Visit(expr);
2251 } else {
2252 ASSERT(var->is_global());
2253 Reference ref(this, node);
2254 ref.GetValue(typeof_state());
2255 }
2256}
2257
2258
2259void CodeGenerator::VisitLiteral(Literal* node) {
2260 Comment cmnt(masm_, "[ Literal");
2261 frame_->Push(node->handle());
2262}
2263
2264
2265// Materialize the regexp literal 'node' in the literals array
2266// 'literals' of the function. Leave the regexp boilerplate in
2267// 'boilerplate'.
2268class DeferredRegExpLiteral: public DeferredCode {
2269 public:
2270 DeferredRegExpLiteral(Register boilerplate,
2271 Register literals,
2272 RegExpLiteral* node)
2273 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2274 set_comment("[ DeferredRegExpLiteral");
2275 }
2276
2277 void Generate();
2278
2279 private:
2280 Register boilerplate_;
2281 Register literals_;
2282 RegExpLiteral* node_;
2283};
2284
2285
2286void DeferredRegExpLiteral::Generate() {
2287 // Since the entry is undefined we call the runtime system to
2288 // compute the literal.
2289 // Literal array (0).
2290 __ push(literals_);
2291 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002292 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002293 // RegExp pattern (2).
2294 __ Push(node_->pattern());
2295 // RegExp flags (3).
2296 __ Push(node_->flags());
2297 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2298 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2299}
2300
2301
2302void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2303 Comment cmnt(masm_, "[ RegExp Literal");
2304
2305 // Retrieve the literals array and check the allocated entry. Begin
2306 // with a writable copy of the function of this activation in a
2307 // register.
2308 frame_->PushFunction();
2309 Result literals = frame_->Pop();
2310 literals.ToRegister();
2311 frame_->Spill(literals.reg());
2312
2313 // Load the literals array of the function.
2314 __ movq(literals.reg(),
2315 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2316
2317 // Load the literal at the ast saved index.
2318 Result boilerplate = allocator_->Allocate();
2319 ASSERT(boilerplate.is_valid());
2320 int literal_offset =
2321 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2322 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2323
2324 // Check whether we need to materialize the RegExp object. If so,
2325 // jump to the deferred code passing the literals array.
2326 DeferredRegExpLiteral* deferred =
2327 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2328 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2329 deferred->Branch(equal);
2330 deferred->BindExit();
2331 literals.Unuse();
2332
2333 // Push the boilerplate object.
2334 frame_->Push(&boilerplate);
2335}
2336
2337
2338// Materialize the object literal 'node' in the literals array
2339// 'literals' of the function. Leave the object boilerplate in
2340// 'boilerplate'.
2341class DeferredObjectLiteral: public DeferredCode {
2342 public:
2343 DeferredObjectLiteral(Register boilerplate,
2344 Register literals,
2345 ObjectLiteral* node)
2346 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2347 set_comment("[ DeferredObjectLiteral");
2348 }
2349
2350 void Generate();
2351
2352 private:
2353 Register boilerplate_;
2354 Register literals_;
2355 ObjectLiteral* node_;
2356};
2357
2358
2359void DeferredObjectLiteral::Generate() {
2360 // Since the entry is undefined we call the runtime system to
2361 // compute the literal.
2362 // Literal array (0).
2363 __ push(literals_);
2364 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002365 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002366 // Constant properties (2).
2367 __ Push(node_->constant_properties());
2368 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
2369 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2370}
2371
2372
2373void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2374 Comment cmnt(masm_, "[ ObjectLiteral");
2375
2376 // Retrieve the literals array and check the allocated entry. Begin
2377 // with a writable copy of the function of this activation in a
2378 // register.
2379 frame_->PushFunction();
2380 Result literals = frame_->Pop();
2381 literals.ToRegister();
2382 frame_->Spill(literals.reg());
2383
2384 // Load the literals array of the function.
2385 __ movq(literals.reg(),
2386 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2387
2388 // Load the literal at the ast saved index.
2389 Result boilerplate = allocator_->Allocate();
2390 ASSERT(boilerplate.is_valid());
2391 int literal_offset =
2392 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2393 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2394
2395 // Check whether we need to materialize the object literal boilerplate.
2396 // If so, jump to the deferred code passing the literals array.
2397 DeferredObjectLiteral* deferred =
2398 new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
2399 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2400 deferred->Branch(equal);
2401 deferred->BindExit();
2402 literals.Unuse();
2403
2404 // Push the boilerplate object.
2405 frame_->Push(&boilerplate);
2406 // Clone the boilerplate object.
2407 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2408 if (node->depth() == 1) {
2409 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2410 }
2411 Result clone = frame_->CallRuntime(clone_function_id, 1);
2412 // Push the newly cloned literal object as the result.
2413 frame_->Push(&clone);
2414
2415 for (int i = 0; i < node->properties()->length(); i++) {
2416 ObjectLiteral::Property* property = node->properties()->at(i);
2417 switch (property->kind()) {
2418 case ObjectLiteral::Property::CONSTANT:
2419 break;
2420 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2421 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2422 // else fall through.
2423 case ObjectLiteral::Property::COMPUTED: {
2424 Handle<Object> key(property->key()->handle());
2425 if (key->IsSymbol()) {
2426 // Duplicate the object as the IC receiver.
2427 frame_->Dup();
2428 Load(property->value());
2429 frame_->Push(key);
2430 Result ignored = frame_->CallStoreIC();
2431 // Drop the duplicated receiver and ignore the result.
2432 frame_->Drop();
2433 break;
2434 }
2435 // Fall through
2436 }
2437 case ObjectLiteral::Property::PROTOTYPE: {
2438 // Duplicate the object as an argument to the runtime call.
2439 frame_->Dup();
2440 Load(property->key());
2441 Load(property->value());
2442 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2443 // Ignore the result.
2444 break;
2445 }
2446 case ObjectLiteral::Property::SETTER: {
2447 // Duplicate the object as an argument to the runtime call.
2448 frame_->Dup();
2449 Load(property->key());
2450 frame_->Push(Smi::FromInt(1));
2451 Load(property->value());
2452 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2453 // Ignore the result.
2454 break;
2455 }
2456 case ObjectLiteral::Property::GETTER: {
2457 // Duplicate the object as an argument to the runtime call.
2458 frame_->Dup();
2459 Load(property->key());
2460 frame_->Push(Smi::FromInt(0));
2461 Load(property->value());
2462 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2463 // Ignore the result.
2464 break;
2465 }
2466 default: UNREACHABLE();
2467 }
2468 }
2469}
2470
2471
2472// Materialize the array literal 'node' in the literals array 'literals'
2473// of the function. Leave the array boilerplate in 'boilerplate'.
2474class DeferredArrayLiteral: public DeferredCode {
2475 public:
2476 DeferredArrayLiteral(Register boilerplate,
2477 Register literals,
2478 ArrayLiteral* node)
2479 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2480 set_comment("[ DeferredArrayLiteral");
2481 }
2482
2483 void Generate();
2484
2485 private:
2486 Register boilerplate_;
2487 Register literals_;
2488 ArrayLiteral* node_;
2489};
2490
2491
2492void DeferredArrayLiteral::Generate() {
2493 // Since the entry is undefined we call the runtime system to
2494 // compute the literal.
2495 // Literal array (0).
2496 __ push(literals_);
2497 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002498 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 // Constant properties (2).
2500 __ Push(node_->literals());
2501 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
2502 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2503}
2504
2505
2506void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2507 Comment cmnt(masm_, "[ ArrayLiteral");
2508
2509 // Retrieve the literals array and check the allocated entry. Begin
2510 // with a writable copy of the function of this activation in a
2511 // register.
2512 frame_->PushFunction();
2513 Result literals = frame_->Pop();
2514 literals.ToRegister();
2515 frame_->Spill(literals.reg());
2516
2517 // Load the literals array of the function.
2518 __ movq(literals.reg(),
2519 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2520
2521 // Load the literal at the ast saved index.
2522 Result boilerplate = allocator_->Allocate();
2523 ASSERT(boilerplate.is_valid());
2524 int literal_offset =
2525 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2526 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2527
2528 // Check whether we need to materialize the object literal boilerplate.
2529 // If so, jump to the deferred code passing the literals array.
2530 DeferredArrayLiteral* deferred =
2531 new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
2532 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2533 deferred->Branch(equal);
2534 deferred->BindExit();
2535 literals.Unuse();
2536
2537 // Push the resulting array literal boilerplate on the stack.
2538 frame_->Push(&boilerplate);
2539 // Clone the boilerplate object.
2540 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2541 if (node->depth() == 1) {
2542 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2543 }
2544 Result clone = frame_->CallRuntime(clone_function_id, 1);
2545 // Push the newly cloned literal object as the result.
2546 frame_->Push(&clone);
2547
2548 // Generate code to set the elements in the array that are not
2549 // literals.
2550 for (int i = 0; i < node->values()->length(); i++) {
2551 Expression* value = node->values()->at(i);
2552
2553 // If value is a literal the property value is already set in the
2554 // boilerplate object.
2555 if (value->AsLiteral() != NULL) continue;
2556 // If value is a materialized literal the property value is already set
2557 // in the boilerplate object if it is simple.
2558 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2559
2560 // The property must be set by generated code.
2561 Load(value);
2562
2563 // Get the property value off the stack.
2564 Result prop_value = frame_->Pop();
2565 prop_value.ToRegister();
2566
2567 // Fetch the array literal while leaving a copy on the stack and
2568 // use it to get the elements array.
2569 frame_->Dup();
2570 Result elements = frame_->Pop();
2571 elements.ToRegister();
2572 frame_->Spill(elements.reg());
2573 // Get the elements FixedArray.
2574 __ movq(elements.reg(),
2575 FieldOperand(elements.reg(), JSObject::kElementsOffset));
2576
2577 // Write to the indexed properties array.
2578 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2579 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2580
2581 // Update the write barrier for the array address.
2582 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
2583 Result scratch = allocator_->Allocate();
2584 ASSERT(scratch.is_valid());
2585 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2586 }
2587}
2588
2589
2590void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2591 ASSERT(!in_spilled_code());
2592 // Call runtime routine to allocate the catch extension object and
2593 // assign the exception value to the catch variable.
2594 Comment cmnt(masm_, "[ CatchExtensionObject");
2595 Load(node->key());
2596 Load(node->value());
2597 Result result =
2598 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2599 frame_->Push(&result);
2600}
2601
2602
2603void CodeGenerator::VisitAssignment(Assignment* node) {
2604 Comment cmnt(masm_, "[ Assignment");
2605
2606 { Reference target(this, node->target());
2607 if (target.is_illegal()) {
2608 // Fool the virtual frame into thinking that we left the assignment's
2609 // value on the frame.
2610 frame_->Push(Smi::FromInt(0));
2611 return;
2612 }
2613 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2614
2615 if (node->starts_initialization_block()) {
2616 ASSERT(target.type() == Reference::NAMED ||
2617 target.type() == Reference::KEYED);
2618 // Change to slow case in the beginning of an initialization
2619 // block to avoid the quadratic behavior of repeatedly adding
2620 // fast properties.
2621
2622 // The receiver is the argument to the runtime call. It is the
2623 // first value pushed when the reference was loaded to the
2624 // frame.
2625 frame_->PushElementAt(target.size() - 1);
2626 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2627 }
2628 if (node->op() == Token::ASSIGN ||
2629 node->op() == Token::INIT_VAR ||
2630 node->op() == Token::INIT_CONST) {
2631 Load(node->value());
2632
2633 } else {
2634 Literal* literal = node->value()->AsLiteral();
2635 bool overwrite_value =
2636 (node->value()->AsBinaryOperation() != NULL &&
2637 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2638 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2639 // There are two cases where the target is not read in the right hand
2640 // side, that are easy to test for: the right hand side is a literal,
2641 // or the right hand side is a different variable. TakeValue invalidates
2642 // the target, with an implicit promise that it will be written to again
2643 // before it is read.
2644 if (literal != NULL || (right_var != NULL && right_var != var)) {
2645 target.TakeValue(NOT_INSIDE_TYPEOF);
2646 } else {
2647 target.GetValue(NOT_INSIDE_TYPEOF);
2648 }
2649 Load(node->value());
2650 GenericBinaryOperation(node->binary_op(),
2651 node->type(),
2652 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2653 }
2654
2655 if (var != NULL &&
2656 var->mode() == Variable::CONST &&
2657 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2658 // Assignment ignored - leave the value on the stack.
2659 } else {
2660 CodeForSourcePosition(node->position());
2661 if (node->op() == Token::INIT_CONST) {
2662 // Dynamic constant initializations must use the function context
2663 // and initialize the actual constant declared. Dynamic variable
2664 // initializations are simply assignments and use SetValue.
2665 target.SetValue(CONST_INIT);
2666 } else {
2667 target.SetValue(NOT_CONST_INIT);
2668 }
2669 if (node->ends_initialization_block()) {
2670 ASSERT(target.type() == Reference::NAMED ||
2671 target.type() == Reference::KEYED);
2672 // End of initialization block. Revert to fast case. The
2673 // argument to the runtime call is the receiver, which is the
2674 // first value pushed as part of the reference, which is below
2675 // the lhs value.
2676 frame_->PushElementAt(target.size());
2677 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2678 }
2679 }
2680 }
2681}
2682
2683
2684void CodeGenerator::VisitThrow(Throw* node) {
2685 Comment cmnt(masm_, "[ Throw");
2686 Load(node->exception());
2687 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2688 frame_->Push(&result);
2689}
2690
2691
2692void CodeGenerator::VisitProperty(Property* node) {
2693 Comment cmnt(masm_, "[ Property");
2694 Reference property(this, node);
2695 property.GetValue(typeof_state());
2696}
2697
2698
2699void CodeGenerator::VisitCall(Call* node) {
2700 Comment cmnt(masm_, "[ Call");
2701
2702 ZoneList<Expression*>* args = node->arguments();
2703
2704 // Check if the function is a variable or a property.
2705 Expression* function = node->expression();
2706 Variable* var = function->AsVariableProxy()->AsVariable();
2707 Property* property = function->AsProperty();
2708
2709 // ------------------------------------------------------------------------
2710 // Fast-case: Use inline caching.
2711 // ---
2712 // According to ECMA-262, section 11.2.3, page 44, the function to call
2713 // must be resolved after the arguments have been evaluated. The IC code
2714 // automatically handles this by loading the arguments before the function
2715 // is resolved in cache misses (this also holds for megamorphic calls).
2716 // ------------------------------------------------------------------------
2717
2718 if (var != NULL && var->is_possibly_eval()) {
2719 // ----------------------------------
2720 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2721 // ----------------------------------
2722
2723 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2724 // resolve the function we need to call and the receiver of the
2725 // call. Then we call the resolved function using the given
2726 // arguments.
2727
2728 // Prepare the stack for the call to the resolved function.
2729 Load(function);
2730
2731 // Allocate a frame slot for the receiver.
2732 frame_->Push(Factory::undefined_value());
2733 int arg_count = args->length();
2734 for (int i = 0; i < arg_count; i++) {
2735 Load(args->at(i));
2736 }
2737
2738 // Prepare the stack for the call to ResolvePossiblyDirectEval.
2739 frame_->PushElementAt(arg_count + 1);
2740 if (arg_count > 0) {
2741 frame_->PushElementAt(arg_count);
2742 } else {
2743 frame_->Push(Factory::undefined_value());
2744 }
2745
2746 // Resolve the call.
2747 Result result =
2748 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
2749
2750 // Touch up the stack with the right values for the function and the
2751 // receiver. Use a scratch register to avoid destroying the result.
2752 Result scratch = allocator_->Allocate();
2753 ASSERT(scratch.is_valid());
2754 __ movq(scratch.reg(),
2755 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
2756 frame_->SetElementAt(arg_count + 1, &scratch);
2757
2758 // We can reuse the result register now.
2759 frame_->Spill(result.reg());
2760 __ movq(result.reg(),
2761 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
2762 frame_->SetElementAt(arg_count, &result);
2763
2764 // Call the function.
2765 CodeForSourcePosition(node->position());
2766 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2767 CallFunctionStub call_function(arg_count, in_loop);
2768 result = frame_->CallStub(&call_function, arg_count + 1);
2769
2770 // Restore the context and overwrite the function on the stack with
2771 // the result.
2772 frame_->RestoreContextRegister();
2773 frame_->SetElementAt(0, &result);
2774
2775 } else if (var != NULL && !var->is_this() && var->is_global()) {
2776 // ----------------------------------
2777 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2778 // ----------------------------------
2779
2780 // Push the name of the function and the receiver onto the stack.
2781 frame_->Push(var->name());
2782
2783 // Pass the global object as the receiver and let the IC stub
2784 // patch the stack to use the global proxy as 'this' in the
2785 // invoked function.
2786 LoadGlobal();
2787
2788 // Load the arguments.
2789 int arg_count = args->length();
2790 for (int i = 0; i < arg_count; i++) {
2791 Load(args->at(i));
2792 }
2793
2794 // Call the IC initialization code.
2795 CodeForSourcePosition(node->position());
2796 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2797 arg_count,
2798 loop_nesting());
2799 frame_->RestoreContextRegister();
2800 // Replace the function on the stack with the result.
2801 frame_->SetElementAt(0, &result);
2802
2803 } else if (var != NULL && var->slot() != NULL &&
2804 var->slot()->type() == Slot::LOOKUP) {
2805 // ----------------------------------
2806 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
2807 // ----------------------------------
2808
2809 // Load the function from the context. Sync the frame so we can
2810 // push the arguments directly into place.
2811 frame_->SyncRange(0, frame_->element_count() - 1);
2812 frame_->EmitPush(rsi);
2813 frame_->EmitPush(var->name());
2814 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2815 // The runtime call returns a pair of values in rax and rdx. The
2816 // looked-up function is in rax and the receiver is in rdx. These
2817 // register references are not ref counted here. We spill them
2818 // eagerly since they are arguments to an inevitable call (and are
2819 // not sharable by the arguments).
2820 ASSERT(!allocator()->is_used(rax));
2821 frame_->EmitPush(rax);
2822
2823 // Load the receiver.
2824 ASSERT(!allocator()->is_used(rdx));
2825 frame_->EmitPush(rdx);
2826
2827 // Call the function.
2828 CallWithArguments(args, node->position());
2829
2830 } else if (property != NULL) {
2831 // Check if the key is a literal string.
2832 Literal* literal = property->key()->AsLiteral();
2833
2834 if (literal != NULL && literal->handle()->IsSymbol()) {
2835 // ------------------------------------------------------------------
2836 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2837 // ------------------------------------------------------------------
2838
2839 Handle<String> name = Handle<String>::cast(literal->handle());
2840
2841 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2842 name->IsEqualTo(CStrVector("apply")) &&
2843 args->length() == 2 &&
2844 args->at(1)->AsVariableProxy() != NULL &&
2845 args->at(1)->AsVariableProxy()->IsArguments()) {
2846 // Use the optimized Function.prototype.apply that avoids
2847 // allocating lazily allocated arguments objects.
2848 CallApplyLazy(property,
2849 args->at(0),
2850 args->at(1)->AsVariableProxy(),
2851 node->position());
2852
2853 } else {
2854 // Push the name of the function and the receiver onto the stack.
2855 frame_->Push(name);
2856 Load(property->obj());
2857
2858 // Load the arguments.
2859 int arg_count = args->length();
2860 for (int i = 0; i < arg_count; i++) {
2861 Load(args->at(i));
2862 }
2863
2864 // Call the IC initialization code.
2865 CodeForSourcePosition(node->position());
2866 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2867 arg_count,
2868 loop_nesting());
2869 frame_->RestoreContextRegister();
2870 // Replace the function on the stack with the result.
2871 frame_->SetElementAt(0, &result);
2872 }
2873
2874 } else {
2875 // -------------------------------------------
2876 // JavaScript example: 'array[index](1, 2, 3)'
2877 // -------------------------------------------
2878
2879 // Load the function to call from the property through a reference.
2880 Reference ref(this, property);
2881 ref.GetValue(NOT_INSIDE_TYPEOF);
2882
2883 // Pass receiver to called function.
2884 if (property->is_synthetic()) {
2885 // Use global object as receiver.
2886 LoadGlobalReceiver();
2887 } else {
2888 // The reference's size is non-negative.
2889 frame_->PushElementAt(ref.size());
2890 }
2891
2892 // Call the function.
2893 CallWithArguments(args, node->position());
2894 }
2895
2896 } else {
2897 // ----------------------------------
2898 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
2899 // ----------------------------------
2900
2901 // Load the function.
2902 Load(function);
2903
2904 // Pass the global proxy as the receiver.
2905 LoadGlobalReceiver();
2906
2907 // Call the function.
2908 CallWithArguments(args, node->position());
2909 }
2910}
2911
2912
2913void CodeGenerator::VisitCallNew(CallNew* node) {
2914 Comment cmnt(masm_, "[ CallNew");
2915
2916 // According to ECMA-262, section 11.2.2, page 44, the function
2917 // expression in new calls must be evaluated before the
2918 // arguments. This is different from ordinary calls, where the
2919 // actual function to call is resolved after the arguments have been
2920 // evaluated.
2921
2922 // Compute function to call and use the global object as the
2923 // receiver. There is no need to use the global proxy here because
2924 // it will always be replaced with a newly allocated object.
2925 Load(node->expression());
2926 LoadGlobal();
2927
2928 // Push the arguments ("left-to-right") on the stack.
2929 ZoneList<Expression*>* args = node->arguments();
2930 int arg_count = args->length();
2931 for (int i = 0; i < arg_count; i++) {
2932 Load(args->at(i));
2933 }
2934
2935 // Call the construct call builtin that handles allocation and
2936 // constructor invocation.
2937 CodeForSourcePosition(node->position());
2938 Result result = frame_->CallConstructor(arg_count);
2939 // Replace the function on the stack with the result.
2940 frame_->SetElementAt(0, &result);
2941}
2942
2943
2944void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2945 if (CheckForInlineRuntimeCall(node)) {
2946 return;
2947 }
2948
2949 ZoneList<Expression*>* args = node->arguments();
2950 Comment cmnt(masm_, "[ CallRuntime");
2951 Runtime::Function* function = node->function();
2952
2953 if (function == NULL) {
2954 // Prepare stack for calling JS runtime function.
2955 frame_->Push(node->name());
2956 // Push the builtins object found in the current global object.
2957 Result temp = allocator()->Allocate();
2958 ASSERT(temp.is_valid());
2959 __ movq(temp.reg(), GlobalObject());
2960 __ movq(temp.reg(),
2961 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
2962 frame_->Push(&temp);
2963 }
2964
2965 // Push the arguments ("left-to-right").
2966 int arg_count = args->length();
2967 for (int i = 0; i < arg_count; i++) {
2968 Load(args->at(i));
2969 }
2970
2971 if (function == NULL) {
2972 // Call the JS runtime function.
2973 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2974 arg_count,
2975 loop_nesting_);
2976 frame_->RestoreContextRegister();
2977 frame_->SetElementAt(0, &answer);
2978 } else {
2979 // Call the C runtime function.
2980 Result answer = frame_->CallRuntime(function, arg_count);
2981 frame_->Push(&answer);
2982 }
2983}
2984
2985
2986void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
2987 // Note that because of NOT and an optimization in comparison of a typeof
2988 // expression to a literal string, this function can fail to leave a value
2989 // on top of the frame or in the cc register.
2990 Comment cmnt(masm_, "[ UnaryOperation");
2991
2992 Token::Value op = node->op();
2993
2994 if (op == Token::NOT) {
2995 // Swap the true and false targets but keep the same actual label
2996 // as the fall through.
2997 destination()->Invert();
2998 LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
2999 // Swap the labels back.
3000 destination()->Invert();
3001
3002 } else if (op == Token::DELETE) {
3003 Property* property = node->expression()->AsProperty();
3004 if (property != NULL) {
3005 Load(property->obj());
3006 Load(property->key());
3007 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
3008 frame_->Push(&answer);
3009 return;
3010 }
3011
3012 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3013 if (variable != NULL) {
3014 Slot* slot = variable->slot();
3015 if (variable->is_global()) {
3016 LoadGlobal();
3017 frame_->Push(variable->name());
3018 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3019 CALL_FUNCTION, 2);
3020 frame_->Push(&answer);
3021 return;
3022
3023 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3024 // Call the runtime to look up the context holding the named
3025 // variable. Sync the virtual frame eagerly so we can push the
3026 // arguments directly into place.
3027 frame_->SyncRange(0, frame_->element_count() - 1);
3028 frame_->EmitPush(rsi);
3029 frame_->EmitPush(variable->name());
3030 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3031 ASSERT(context.is_register());
3032 frame_->EmitPush(context.reg());
3033 context.Unuse();
3034 frame_->EmitPush(variable->name());
3035 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3036 CALL_FUNCTION, 2);
3037 frame_->Push(&answer);
3038 return;
3039 }
3040
3041 // Default: Result of deleting non-global, not dynamically
3042 // introduced variables is false.
3043 frame_->Push(Factory::false_value());
3044
3045 } else {
3046 // Default: Result of deleting expressions is true.
3047 Load(node->expression()); // may have side-effects
3048 frame_->SetElementAt(0, Factory::true_value());
3049 }
3050
3051 } else if (op == Token::TYPEOF) {
3052 // Special case for loading the typeof expression; see comment on
3053 // LoadTypeofExpression().
3054 LoadTypeofExpression(node->expression());
3055 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3056 frame_->Push(&answer);
3057
3058 } else if (op == Token::VOID) {
3059 Expression* expression = node->expression();
3060 if (expression && expression->AsLiteral() && (
3061 expression->AsLiteral()->IsTrue() ||
3062 expression->AsLiteral()->IsFalse() ||
3063 expression->AsLiteral()->handle()->IsNumber() ||
3064 expression->AsLiteral()->handle()->IsString() ||
3065 expression->AsLiteral()->handle()->IsJSRegExp() ||
3066 expression->AsLiteral()->IsNull())) {
3067 // Omit evaluating the value of the primitive literal.
3068 // It will be discarded anyway, and can have no side effect.
3069 frame_->Push(Factory::undefined_value());
3070 } else {
3071 Load(node->expression());
3072 frame_->SetElementAt(0, Factory::undefined_value());
3073 }
3074
3075 } else {
3076 Load(node->expression());
3077 switch (op) {
3078 case Token::NOT:
3079 case Token::DELETE:
3080 case Token::TYPEOF:
3081 UNREACHABLE(); // handled above
3082 break;
3083
3084 case Token::SUB: {
3085 bool overwrite =
Steve Block3ce2e202009-11-05 08:53:23 +00003086 (node->expression()->AsBinaryOperation() != NULL &&
3087 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00003088 UnarySubStub stub(overwrite);
3089 // TODO(1222589): remove dependency of TOS being cached inside stub
3090 Result operand = frame_->Pop();
3091 Result answer = frame_->CallStub(&stub, &operand);
3092 frame_->Push(&answer);
3093 break;
3094 }
3095
3096 case Token::BIT_NOT: {
3097 // Smi check.
3098 JumpTarget smi_label;
3099 JumpTarget continue_label;
3100 Result operand = frame_->Pop();
3101 operand.ToRegister();
3102
3103 Condition is_smi = masm_->CheckSmi(operand.reg());
3104 smi_label.Branch(is_smi, &operand);
3105
3106 frame_->Push(&operand); // undo popping of TOS
3107 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
3108 CALL_FUNCTION, 1);
3109 continue_label.Jump(&answer);
3110 smi_label.Bind(&answer);
3111 answer.ToRegister();
3112 frame_->Spill(answer.reg());
3113 __ SmiNot(answer.reg(), answer.reg());
3114 continue_label.Bind(&answer);
3115 frame_->Push(&answer);
3116 break;
3117 }
3118
3119 case Token::ADD: {
3120 // Smi check.
3121 JumpTarget continue_label;
3122 Result operand = frame_->Pop();
3123 operand.ToRegister();
3124 Condition is_smi = masm_->CheckSmi(operand.reg());
3125 continue_label.Branch(is_smi, &operand);
3126 frame_->Push(&operand);
3127 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3128 CALL_FUNCTION, 1);
3129
3130 continue_label.Bind(&answer);
3131 frame_->Push(&answer);
3132 break;
3133 }
3134
3135 default:
3136 UNREACHABLE();
3137 }
3138 }
3139}
3140
3141
3142// The value in dst was optimistically incremented or decremented. The
3143// result overflowed or was not smi tagged. Undo the operation, call
3144// into the runtime to convert the argument to a number, and call the
3145// specialized add or subtract stub. The result is left in dst.
3146class DeferredPrefixCountOperation: public DeferredCode {
3147 public:
3148 DeferredPrefixCountOperation(Register dst, bool is_increment)
3149 : dst_(dst), is_increment_(is_increment) {
3150 set_comment("[ DeferredCountOperation");
3151 }
3152
3153 virtual void Generate();
3154
3155 private:
3156 Register dst_;
3157 bool is_increment_;
3158};
3159
3160
3161void DeferredPrefixCountOperation::Generate() {
3162 __ push(dst_);
3163 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3164 __ push(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00003165 __ Push(Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00003166 if (is_increment_) {
3167 __ CallRuntime(Runtime::kNumberAdd, 2);
3168 } else {
3169 __ CallRuntime(Runtime::kNumberSub, 2);
3170 }
3171 if (!dst_.is(rax)) __ movq(dst_, rax);
3172}
3173
3174
3175// The value in dst was optimistically incremented or decremented. The
3176// result overflowed or was not smi tagged. Undo the operation and call
3177// into the runtime to convert the argument to a number. Update the
3178// original value in old. Call the specialized add or subtract stub.
3179// The result is left in dst.
3180class DeferredPostfixCountOperation: public DeferredCode {
3181 public:
3182 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
3183 : dst_(dst), old_(old), is_increment_(is_increment) {
3184 set_comment("[ DeferredCountOperation");
3185 }
3186
3187 virtual void Generate();
3188
3189 private:
3190 Register dst_;
3191 Register old_;
3192 bool is_increment_;
3193};
3194
3195
3196void DeferredPostfixCountOperation::Generate() {
3197 __ push(dst_);
3198 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3199
3200 // Save the result of ToNumber to use as the old value.
3201 __ push(rax);
3202
3203 // Call the runtime for the addition or subtraction.
3204 __ push(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00003205 __ Push(Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00003206 if (is_increment_) {
3207 __ CallRuntime(Runtime::kNumberAdd, 2);
3208 } else {
3209 __ CallRuntime(Runtime::kNumberSub, 2);
3210 }
3211 if (!dst_.is(rax)) __ movq(dst_, rax);
3212 __ pop(old_);
3213}
3214
3215
3216void CodeGenerator::VisitCountOperation(CountOperation* node) {
3217 Comment cmnt(masm_, "[ CountOperation");
3218
3219 bool is_postfix = node->is_postfix();
3220 bool is_increment = node->op() == Token::INC;
3221
3222 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3223 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3224
3225 // Postfix operations need a stack slot under the reference to hold
3226 // the old value while the new value is being stored. This is so that
3227 // in the case that storing the new value requires a call, the old
3228 // value will be in the frame to be spilled.
3229 if (is_postfix) frame_->Push(Smi::FromInt(0));
3230
3231 { Reference target(this, node->expression());
3232 if (target.is_illegal()) {
3233 // Spoof the virtual frame to have the expected height (one higher
3234 // than on entry).
3235 if (!is_postfix) frame_->Push(Smi::FromInt(0));
3236 return;
3237 }
3238 target.TakeValue(NOT_INSIDE_TYPEOF);
3239
3240 Result new_value = frame_->Pop();
3241 new_value.ToRegister();
3242
3243 Result old_value; // Only allocated in the postfix case.
3244 if (is_postfix) {
3245 // Allocate a temporary to preserve the old value.
3246 old_value = allocator_->Allocate();
3247 ASSERT(old_value.is_valid());
3248 __ movq(old_value.reg(), new_value.reg());
3249 }
3250 // Ensure the new value is writable.
3251 frame_->Spill(new_value.reg());
3252
3253 DeferredCode* deferred = NULL;
3254 if (is_postfix) {
3255 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3256 old_value.reg(),
3257 is_increment);
3258 } else {
3259 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3260 is_increment);
3261 }
3262
Steve Block3ce2e202009-11-05 08:53:23 +00003263 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003264 if (is_increment) {
Steve Block3ce2e202009-11-05 08:53:23 +00003265 __ SmiAddConstant(kScratchRegister,
3266 new_value.reg(),
3267 Smi::FromInt(1),
3268 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003269 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00003270 __ SmiSubConstant(kScratchRegister,
3271 new_value.reg(),
3272 Smi::FromInt(1),
3273 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003274 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003275 __ movq(new_value.reg(), kScratchRegister);
3276 deferred->BindExit();
3277
3278 // Postfix: store the old value in the allocated slot under the
3279 // reference.
3280 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3281
3282 frame_->Push(&new_value);
3283 // Non-constant: update the reference.
3284 if (!is_const) target.SetValue(NOT_CONST_INIT);
3285 }
3286
3287 // Postfix: drop the new value and use the old.
3288 if (is_postfix) frame_->Drop();
3289}
3290
3291
3292void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3293 // TODO(X64): This code was copied verbatim from codegen-ia32.
3294 // Either find a reason to change it or move it to a shared location.
3295
3296 // Note that due to an optimization in comparison operations (typeof
3297 // compared to a string literal), we can evaluate a binary expression such
3298 // as AND or OR and not leave a value on the frame or in the cc register.
3299 Comment cmnt(masm_, "[ BinaryOperation");
3300 Token::Value op = node->op();
3301
3302 // According to ECMA-262 section 11.11, page 58, the binary logical
3303 // operators must yield the result of one of the two expressions
3304 // before any ToBoolean() conversions. This means that the value
3305 // produced by a && or || operator is not necessarily a boolean.
3306
3307 // NOTE: If the left hand side produces a materialized value (not
3308 // control flow), we force the right hand side to do the same. This
3309 // is necessary because we assume that if we get control flow on the
3310 // last path out of an expression we got it on all paths.
3311 if (op == Token::AND) {
3312 JumpTarget is_true;
3313 ControlDestination dest(&is_true, destination()->false_target(), true);
3314 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
3315
3316 if (dest.false_was_fall_through()) {
3317 // The current false target was used as the fall-through. If
3318 // there are no dangling jumps to is_true then the left
3319 // subexpression was unconditionally false. Otherwise we have
3320 // paths where we do have to evaluate the right subexpression.
3321 if (is_true.is_linked()) {
3322 // We need to compile the right subexpression. If the jump to
3323 // the current false target was a forward jump then we have a
3324 // valid frame, we have just bound the false target, and we
3325 // have to jump around the code for the right subexpression.
3326 if (has_valid_frame()) {
3327 destination()->false_target()->Unuse();
3328 destination()->false_target()->Jump();
3329 }
3330 is_true.Bind();
3331 // The left subexpression compiled to control flow, so the
3332 // right one is free to do so as well.
3333 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3334 } else {
3335 // We have actually just jumped to or bound the current false
3336 // target but the current control destination is not marked as
3337 // used.
3338 destination()->Use(false);
3339 }
3340
3341 } else if (dest.is_used()) {
3342 // The left subexpression compiled to control flow (and is_true
3343 // was just bound), so the right is free to do so as well.
3344 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3345
3346 } else {
3347 // We have a materialized value on the frame, so we exit with
3348 // one on all paths. There are possibly also jumps to is_true
3349 // from nested subexpressions.
3350 JumpTarget pop_and_continue;
3351 JumpTarget exit;
3352
3353 // Avoid popping the result if it converts to 'false' using the
3354 // standard ToBoolean() conversion as described in ECMA-262,
3355 // section 9.2, page 30.
3356 //
3357 // Duplicate the TOS value. The duplicate will be popped by
3358 // ToBoolean.
3359 frame_->Dup();
3360 ControlDestination dest(&pop_and_continue, &exit, true);
3361 ToBoolean(&dest);
3362
3363 // Pop the result of evaluating the first part.
3364 frame_->Drop();
3365
3366 // Compile right side expression.
3367 is_true.Bind();
3368 Load(node->right());
3369
3370 // Exit (always with a materialized value).
3371 exit.Bind();
3372 }
3373
3374 } else if (op == Token::OR) {
3375 JumpTarget is_false;
3376 ControlDestination dest(destination()->true_target(), &is_false, false);
3377 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
3378
3379 if (dest.true_was_fall_through()) {
3380 // The current true target was used as the fall-through. If
3381 // there are no dangling jumps to is_false then the left
3382 // subexpression was unconditionally true. Otherwise we have
3383 // paths where we do have to evaluate the right subexpression.
3384 if (is_false.is_linked()) {
3385 // We need to compile the right subexpression. If the jump to
3386 // the current true target was a forward jump then we have a
3387 // valid frame, we have just bound the true target, and we
3388 // have to jump around the code for the right subexpression.
3389 if (has_valid_frame()) {
3390 destination()->true_target()->Unuse();
3391 destination()->true_target()->Jump();
3392 }
3393 is_false.Bind();
3394 // The left subexpression compiled to control flow, so the
3395 // right one is free to do so as well.
3396 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3397 } else {
3398 // We have just jumped to or bound the current true target but
3399 // the current control destination is not marked as used.
3400 destination()->Use(true);
3401 }
3402
3403 } else if (dest.is_used()) {
3404 // The left subexpression compiled to control flow (and is_false
3405 // was just bound), so the right is free to do so as well.
3406 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
3407
3408 } else {
3409 // We have a materialized value on the frame, so we exit with
3410 // one on all paths. There are possibly also jumps to is_false
3411 // from nested subexpressions.
3412 JumpTarget pop_and_continue;
3413 JumpTarget exit;
3414
3415 // Avoid popping the result if it converts to 'true' using the
3416 // standard ToBoolean() conversion as described in ECMA-262,
3417 // section 9.2, page 30.
3418 //
3419 // Duplicate the TOS value. The duplicate will be popped by
3420 // ToBoolean.
3421 frame_->Dup();
3422 ControlDestination dest(&exit, &pop_and_continue, false);
3423 ToBoolean(&dest);
3424
3425 // Pop the result of evaluating the first part.
3426 frame_->Drop();
3427
3428 // Compile right side expression.
3429 is_false.Bind();
3430 Load(node->right());
3431
3432 // Exit (always with a materialized value).
3433 exit.Bind();
3434 }
3435
3436 } else {
3437 // NOTE: The code below assumes that the slow cases (calls to runtime)
3438 // never return a constant/immutable object.
3439 OverwriteMode overwrite_mode = NO_OVERWRITE;
3440 if (node->left()->AsBinaryOperation() != NULL &&
3441 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3442 overwrite_mode = OVERWRITE_LEFT;
3443 } else if (node->right()->AsBinaryOperation() != NULL &&
3444 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3445 overwrite_mode = OVERWRITE_RIGHT;
3446 }
3447
3448 Load(node->left());
3449 Load(node->right());
3450 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3451 }
3452}
3453
3454
3455
3456void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3457 Comment cmnt(masm_, "[ CompareOperation");
3458
3459 // Get the expressions from the node.
3460 Expression* left = node->left();
3461 Expression* right = node->right();
3462 Token::Value op = node->op();
3463 // To make typeof testing for natives implemented in JavaScript really
3464 // efficient, we generate special code for expressions of the form:
3465 // 'typeof <expression> == <string>'.
3466 UnaryOperation* operation = left->AsUnaryOperation();
3467 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3468 (operation != NULL && operation->op() == Token::TYPEOF) &&
3469 (right->AsLiteral() != NULL &&
3470 right->AsLiteral()->handle()->IsString())) {
3471 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3472
3473 // Load the operand and move it to a register.
3474 LoadTypeofExpression(operation->expression());
3475 Result answer = frame_->Pop();
3476 answer.ToRegister();
3477
3478 if (check->Equals(Heap::number_symbol())) {
3479 Condition is_smi = masm_->CheckSmi(answer.reg());
3480 destination()->true_target()->Branch(is_smi);
3481 frame_->Spill(answer.reg());
3482 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3483 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3484 answer.Unuse();
3485 destination()->Split(equal);
3486
3487 } else if (check->Equals(Heap::string_symbol())) {
3488 Condition is_smi = masm_->CheckSmi(answer.reg());
3489 destination()->false_target()->Branch(is_smi);
3490
3491 // It can be an undetectable string object.
3492 __ movq(kScratchRegister,
3493 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3494 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3495 Immediate(1 << Map::kIsUndetectable));
3496 destination()->false_target()->Branch(not_zero);
3497 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3498 answer.Unuse();
3499 destination()->Split(below); // Unsigned byte comparison needed.
3500
3501 } else if (check->Equals(Heap::boolean_symbol())) {
3502 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3503 destination()->true_target()->Branch(equal);
3504 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3505 answer.Unuse();
3506 destination()->Split(equal);
3507
3508 } else if (check->Equals(Heap::undefined_symbol())) {
3509 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3510 destination()->true_target()->Branch(equal);
3511
3512 Condition is_smi = masm_->CheckSmi(answer.reg());
3513 destination()->false_target()->Branch(is_smi);
3514
3515 // It can be an undetectable object.
3516 __ movq(kScratchRegister,
3517 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3518 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3519 Immediate(1 << Map::kIsUndetectable));
3520 answer.Unuse();
3521 destination()->Split(not_zero);
3522
3523 } else if (check->Equals(Heap::function_symbol())) {
3524 Condition is_smi = masm_->CheckSmi(answer.reg());
3525 destination()->false_target()->Branch(is_smi);
3526 frame_->Spill(answer.reg());
3527 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3528 answer.Unuse();
3529 destination()->Split(equal);
3530
3531 } else if (check->Equals(Heap::object_symbol())) {
3532 Condition is_smi = masm_->CheckSmi(answer.reg());
3533 destination()->false_target()->Branch(is_smi);
3534 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3535 destination()->true_target()->Branch(equal);
3536
3537 // It can be an undetectable object.
3538 __ movq(kScratchRegister,
3539 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3540 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3541 Immediate(1 << Map::kIsUndetectable));
3542 destination()->false_target()->Branch(not_zero);
3543 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3544 destination()->false_target()->Branch(below);
3545 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3546 answer.Unuse();
3547 destination()->Split(below_equal);
3548 } else {
3549 // Uncommon case: typeof testing against a string literal that is
3550 // never returned from the typeof operator.
3551 answer.Unuse();
3552 destination()->Goto(false);
3553 }
3554 return;
3555 }
3556
3557 Condition cc = no_condition;
3558 bool strict = false;
3559 switch (op) {
3560 case Token::EQ_STRICT:
3561 strict = true;
3562 // Fall through
3563 case Token::EQ:
3564 cc = equal;
3565 break;
3566 case Token::LT:
3567 cc = less;
3568 break;
3569 case Token::GT:
3570 cc = greater;
3571 break;
3572 case Token::LTE:
3573 cc = less_equal;
3574 break;
3575 case Token::GTE:
3576 cc = greater_equal;
3577 break;
3578 case Token::IN: {
3579 Load(left);
3580 Load(right);
3581 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3582 frame_->Push(&answer); // push the result
3583 return;
3584 }
3585 case Token::INSTANCEOF: {
3586 Load(left);
3587 Load(right);
3588 InstanceofStub stub;
3589 Result answer = frame_->CallStub(&stub, 2);
3590 answer.ToRegister();
3591 __ testq(answer.reg(), answer.reg());
3592 answer.Unuse();
3593 destination()->Split(zero);
3594 return;
3595 }
3596 default:
3597 UNREACHABLE();
3598 }
3599 Load(left);
3600 Load(right);
3601 Comparison(cc, strict, destination());
3602}
3603
3604
3605void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3606 frame_->PushFunction();
3607}
3608
3609
3610void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3611 ASSERT(args->length() == 1);
3612
3613 // ArgumentsAccessStub expects the key in rdx and the formal
3614 // parameter count in rax.
3615 Load(args->at(0));
3616 Result key = frame_->Pop();
3617 // Explicitly create a constant result.
3618 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3619 // Call the shared stub to get to arguments[key].
3620 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3621 Result result = frame_->CallStub(&stub, &key, &count);
3622 frame_->Push(&result);
3623}
3624
3625
3626void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3627 ASSERT(args->length() == 1);
3628 Load(args->at(0));
3629 Result value = frame_->Pop();
3630 value.ToRegister();
3631 ASSERT(value.is_valid());
3632 Condition is_smi = masm_->CheckSmi(value.reg());
3633 destination()->false_target()->Branch(is_smi);
3634 // It is a heap object - get map.
3635 // Check if the object is a JS array or not.
3636 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3637 value.Unuse();
3638 destination()->Split(equal);
3639}
3640
3641
3642void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3643 ASSERT(args->length() == 0);
3644
3645 // Get the frame pointer for the calling frame.
3646 Result fp = allocator()->Allocate();
3647 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3648
3649 // Skip the arguments adaptor frame if it exists.
3650 Label check_frame_marker;
Steve Block3ce2e202009-11-05 08:53:23 +00003651 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3652 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00003653 __ j(not_equal, &check_frame_marker);
3654 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3655
3656 // Check the marker in the calling frame.
3657 __ bind(&check_frame_marker);
Steve Block3ce2e202009-11-05 08:53:23 +00003658 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3659 Smi::FromInt(StackFrame::CONSTRUCT));
Steve Blocka7e24c12009-10-30 11:49:00 +00003660 fp.Unuse();
3661 destination()->Split(equal);
3662}
3663
3664
3665void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3666 ASSERT(args->length() == 0);
3667 // ArgumentsAccessStub takes the parameter count as an input argument
3668 // in register eax. Create a constant result for it.
3669 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3670 // Call the shared stub to get to the arguments.length.
3671 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3672 Result result = frame_->CallStub(&stub, &count);
3673 frame_->Push(&result);
3674}
3675
3676
3677void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3678 Comment(masm_, "[ GenerateFastCharCodeAt");
3679 ASSERT(args->length() == 2);
3680
3681 Label slow_case;
3682 Label end;
3683 Label not_a_flat_string;
3684 Label a_cons_string;
3685 Label try_again_with_new_string;
3686 Label ascii_string;
3687 Label got_char_code;
3688
3689 Load(args->at(0));
3690 Load(args->at(1));
3691 Result index = frame_->Pop();
3692 Result object = frame_->Pop();
3693
3694 // Get register rcx to use as shift amount later.
3695 Result shift_amount;
3696 if (object.is_register() && object.reg().is(rcx)) {
3697 Result fresh = allocator_->Allocate();
3698 shift_amount = object;
3699 object = fresh;
3700 __ movq(object.reg(), rcx);
3701 }
3702 if (index.is_register() && index.reg().is(rcx)) {
3703 Result fresh = allocator_->Allocate();
3704 shift_amount = index;
3705 index = fresh;
3706 __ movq(index.reg(), rcx);
3707 }
3708 // There could be references to ecx in the frame. Allocating will
3709 // spill them, otherwise spill explicitly.
3710 if (shift_amount.is_valid()) {
3711 frame_->Spill(rcx);
3712 } else {
3713 shift_amount = allocator()->Allocate(rcx);
3714 }
3715 ASSERT(shift_amount.is_register());
3716 ASSERT(shift_amount.reg().is(rcx));
3717 ASSERT(allocator_->count(rcx) == 1);
3718
3719 // We will mutate the index register and possibly the object register.
3720 // The case where they are somehow the same register is handled
3721 // because we only mutate them in the case where the receiver is a
3722 // heap object and the index is not.
3723 object.ToRegister();
3724 index.ToRegister();
3725 frame_->Spill(object.reg());
3726 frame_->Spill(index.reg());
3727
3728 // We need a single extra temporary register.
3729 Result temp = allocator()->Allocate();
3730 ASSERT(temp.is_valid());
3731
3732 // There is no virtual frame effect from here up to the final result
3733 // push.
3734
3735 // If the receiver is a smi trigger the slow case.
3736 __ JumpIfSmi(object.reg(), &slow_case);
3737
3738 // If the index is negative or non-smi trigger the slow case.
3739 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3740
3741 // Untag the index.
3742 __ SmiToInteger32(index.reg(), index.reg());
3743
3744 __ bind(&try_again_with_new_string);
3745 // Fetch the instance type of the receiver into rcx.
3746 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3747 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3748 // If the receiver is not a string trigger the slow case.
3749 __ testb(rcx, Immediate(kIsNotStringMask));
3750 __ j(not_zero, &slow_case);
3751
3752 // Here we make assumptions about the tag values and the shifts needed.
3753 // See the comment in objects.h.
3754 ASSERT(kLongStringTag == 0);
3755 ASSERT(kMediumStringTag + String::kLongLengthShift ==
3756 String::kMediumLengthShift);
3757 ASSERT(kShortStringTag + String::kLongLengthShift ==
3758 String::kShortLengthShift);
3759 __ and_(rcx, Immediate(kStringSizeMask));
3760 __ addq(rcx, Immediate(String::kLongLengthShift));
3761 // Fetch the length field into the temporary register.
3762 __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
3763 __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
3764 // Check for index out of range.
3765 __ cmpl(index.reg(), temp.reg());
3766 __ j(greater_equal, &slow_case);
3767 // Reload the instance type (into the temp register this time)..
3768 __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3769 __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3770
3771 // We need special handling for non-flat strings.
3772 ASSERT(kSeqStringTag == 0);
3773 __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3774 __ j(not_zero, &not_a_flat_string);
3775 // Check for 1-byte or 2-byte string.
3776 __ testb(temp.reg(), Immediate(kStringEncodingMask));
3777 __ j(not_zero, &ascii_string);
3778
3779 // 2-byte string.
3780 // Load the 2-byte character code into the temp register.
3781 __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3782 index.reg(),
3783 times_2,
3784 SeqTwoByteString::kHeaderSize));
3785 __ jmp(&got_char_code);
3786
3787 // ASCII string.
3788 __ bind(&ascii_string);
3789 // Load the byte into the temp register.
3790 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3791 index.reg(),
3792 times_1,
3793 SeqAsciiString::kHeaderSize));
3794 __ bind(&got_char_code);
3795 __ Integer32ToSmi(temp.reg(), temp.reg());
3796 __ jmp(&end);
3797
3798 // Handle non-flat strings.
3799 __ bind(&not_a_flat_string);
3800 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3801 __ cmpb(temp.reg(), Immediate(kConsStringTag));
3802 __ j(equal, &a_cons_string);
3803 __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
3804 __ j(not_equal, &slow_case);
3805
3806 // SlicedString.
3807 // Add the offset to the index and trigger the slow case on overflow.
3808 __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
3809 __ j(overflow, &slow_case);
3810 // Getting the underlying string is done by running the cons string code.
3811
3812 // ConsString.
3813 __ bind(&a_cons_string);
3814 // Get the first of the two strings. Both sliced and cons strings
3815 // store their source string at the same offset.
3816 ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
3817 __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3818 __ jmp(&try_again_with_new_string);
3819
3820 __ bind(&slow_case);
3821 // Move the undefined value into the result register, which will
3822 // trigger the slow case.
3823 __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3824
3825 __ bind(&end);
3826 frame_->Push(&temp);
3827}
3828
3829
3830void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3831 ASSERT(args->length() == 1);
3832 Load(args->at(0));
3833 Result value = frame_->Pop();
3834 value.ToRegister();
3835 ASSERT(value.is_valid());
3836 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3837 value.Unuse();
3838 destination()->Split(positive_smi);
3839}
3840
3841
3842void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3843 ASSERT(args->length() == 1);
3844 Load(args->at(0));
3845 Result value = frame_->Pop();
3846 value.ToRegister();
3847 ASSERT(value.is_valid());
3848 Condition is_smi = masm_->CheckSmi(value.reg());
3849 value.Unuse();
3850 destination()->Split(is_smi);
3851}
3852
3853
3854void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3855 // Conditionally generate a log call.
3856 // Args:
3857 // 0 (literal string): The type of logging (corresponds to the flags).
3858 // This is used to determine whether or not to generate the log call.
3859 // 1 (string): Format string. Access the string at argument index 2
3860 // with '%2s' (see Logger::LogRuntime for all the formats).
3861 // 2 (array): Arguments to the format string.
3862 ASSERT_EQ(args->length(), 3);
3863#ifdef ENABLE_LOGGING_AND_PROFILING
3864 if (ShouldGenerateLog(args->at(0))) {
3865 Load(args->at(1));
3866 Load(args->at(2));
3867 frame_->CallRuntime(Runtime::kLog, 2);
3868 }
3869#endif
3870 // Finally, we're expected to leave a value on the top of the stack.
3871 frame_->Push(Factory::undefined_value());
3872}
3873
3874
3875void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3876 ASSERT(args->length() == 2);
3877
3878 // Load the two objects into registers and perform the comparison.
3879 Load(args->at(0));
3880 Load(args->at(1));
3881 Result right = frame_->Pop();
3882 Result left = frame_->Pop();
3883 right.ToRegister();
3884 left.ToRegister();
3885 __ cmpq(right.reg(), left.reg());
3886 right.Unuse();
3887 left.Unuse();
3888 destination()->Split(equal);
3889}
3890
3891
3892void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
3893 ASSERT(args->length() == 0);
3894 // RBP value is aligned, so it should be tagged as a smi (without necesarily
Steve Block3ce2e202009-11-05 08:53:23 +00003895 // being padded as a smi, so it should not be treated as a smi.).
Steve Blocka7e24c12009-10-30 11:49:00 +00003896 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3897 Result rbp_as_smi = allocator_->Allocate();
3898 ASSERT(rbp_as_smi.is_valid());
3899 __ movq(rbp_as_smi.reg(), rbp);
3900 frame_->Push(&rbp_as_smi);
3901}
3902
3903
3904void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3905 ASSERT(args->length() == 0);
3906 frame_->SpillAll();
3907 __ push(rsi);
3908
3909 // Make sure the frame is aligned like the OS expects.
3910 static const int kFrameAlignment = OS::ActivationFrameAlignment();
3911 if (kFrameAlignment > 0) {
3912 ASSERT(IsPowerOf2(kFrameAlignment));
3913 __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
3914 __ and_(rsp, Immediate(-kFrameAlignment));
3915 }
3916
3917 // Call V8::RandomPositiveSmi().
3918 __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
3919
3920 // Restore stack pointer from callee-saved register.
3921 if (kFrameAlignment > 0) {
3922 __ movq(rsp, rbx);
3923 }
3924
3925 __ pop(rsi);
3926 Result result = allocator_->Allocate(rax);
3927 frame_->Push(&result);
3928}
3929
3930
3931void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3932 JumpTarget done;
3933 JumpTarget call_runtime;
3934 ASSERT(args->length() == 1);
3935
3936 // Load number and duplicate it.
3937 Load(args->at(0));
3938 frame_->Dup();
3939
3940 // Get the number into an unaliased register and load it onto the
3941 // floating point stack still leaving one copy on the frame.
3942 Result number = frame_->Pop();
3943 number.ToRegister();
3944 frame_->Spill(number.reg());
3945 FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
3946 number.Unuse();
3947
3948 // Perform the operation on the number.
3949 switch (op) {
3950 case SIN:
3951 __ fsin();
3952 break;
3953 case COS:
3954 __ fcos();
3955 break;
3956 }
3957
3958 // Go slow case if argument to operation is out of range.
3959 Result eax_reg = allocator()->Allocate(rax);
3960 ASSERT(eax_reg.is_valid());
3961 __ fnstsw_ax();
3962 __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
3963 eax_reg.Unuse();
3964 call_runtime.Branch(not_zero);
3965
3966 // Allocate heap number for result if possible.
3967 Result scratch = allocator()->Allocate();
3968 Result heap_number = allocator()->Allocate();
Steve Block3ce2e202009-11-05 08:53:23 +00003969 __ AllocateHeapNumber(heap_number.reg(),
3970 scratch.reg(),
3971 call_runtime.entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003972 scratch.Unuse();
3973
3974 // Store the result in the allocated heap number.
3975 __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
3976 // Replace the extra copy of the argument with the result.
3977 frame_->SetElementAt(0, &heap_number);
3978 done.Jump();
3979
3980 call_runtime.Bind();
3981 // Free ST(0) which was not popped before calling into the runtime.
3982 __ ffree(0);
3983 Result answer;
3984 switch (op) {
3985 case SIN:
3986 answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
3987 break;
3988 case COS:
3989 answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
3990 break;
3991 }
3992 frame_->Push(&answer);
3993 done.Bind();
3994}
3995
3996
3997void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3998 ASSERT(args->length() == 1);
3999 JumpTarget leave, null, function, non_function_constructor;
4000 Load(args->at(0)); // Load the object.
4001 Result obj = frame_->Pop();
4002 obj.ToRegister();
4003 frame_->Spill(obj.reg());
4004
4005 // If the object is a smi, we return null.
4006 Condition is_smi = masm_->CheckSmi(obj.reg());
4007 null.Branch(is_smi);
4008
4009 // Check that the object is a JS object but take special care of JS
4010 // functions to make sure they have 'Function' as their class.
4011
4012 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4013 null.Branch(below);
4014
4015 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4016 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4017 // LAST_JS_OBJECT_TYPE.
4018 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4019 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4020 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4021 function.Branch(equal);
4022
4023 // Check if the constructor in the map is a function.
4024 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4025 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4026 non_function_constructor.Branch(not_equal);
4027
4028 // The obj register now contains the constructor function. Grab the
4029 // instance class name from there.
4030 __ movq(obj.reg(),
4031 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4032 __ movq(obj.reg(),
4033 FieldOperand(obj.reg(),
4034 SharedFunctionInfo::kInstanceClassNameOffset));
4035 frame_->Push(&obj);
4036 leave.Jump();
4037
4038 // Functions have class 'Function'.
4039 function.Bind();
4040 frame_->Push(Factory::function_class_symbol());
4041 leave.Jump();
4042
4043 // Objects with a non-function constructor have class 'Object'.
4044 non_function_constructor.Bind();
4045 frame_->Push(Factory::Object_symbol());
4046 leave.Jump();
4047
4048 // Non-JS objects have class null.
4049 null.Bind();
4050 frame_->Push(Factory::null_value());
4051
4052 // All done.
4053 leave.Bind();
4054}
4055
4056
4057void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4058 ASSERT(args->length() == 2);
4059 JumpTarget leave;
4060 Load(args->at(0)); // Load the object.
4061 Load(args->at(1)); // Load the value.
4062 Result value = frame_->Pop();
4063 Result object = frame_->Pop();
4064 value.ToRegister();
4065 object.ToRegister();
4066
4067 // if (object->IsSmi()) return value.
4068 Condition is_smi = masm_->CheckSmi(object.reg());
4069 leave.Branch(is_smi, &value);
4070
4071 // It is a heap object - get its map.
4072 Result scratch = allocator_->Allocate();
4073 ASSERT(scratch.is_valid());
4074 // if (!object->IsJSValue()) return value.
4075 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4076 leave.Branch(not_equal, &value);
4077
4078 // Store the value.
4079 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4080 // Update the write barrier. Save the value as it will be
4081 // overwritten by the write barrier code and is needed afterward.
4082 Result duplicate_value = allocator_->Allocate();
4083 ASSERT(duplicate_value.is_valid());
4084 __ movq(duplicate_value.reg(), value.reg());
4085 // The object register is also overwritten by the write barrier and
4086 // possibly aliased in the frame.
4087 frame_->Spill(object.reg());
4088 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4089 scratch.reg());
4090 object.Unuse();
4091 scratch.Unuse();
4092 duplicate_value.Unuse();
4093
4094 // Leave.
4095 leave.Bind(&value);
4096 frame_->Push(&value);
4097}
4098
4099
4100void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4101 ASSERT(args->length() == 1);
4102 JumpTarget leave;
4103 Load(args->at(0)); // Load the object.
4104 frame_->Dup();
4105 Result object = frame_->Pop();
4106 object.ToRegister();
4107 ASSERT(object.is_valid());
4108 // if (object->IsSmi()) return object.
4109 Condition is_smi = masm_->CheckSmi(object.reg());
4110 leave.Branch(is_smi);
4111 // It is a heap object - get map.
4112 Result temp = allocator()->Allocate();
4113 ASSERT(temp.is_valid());
4114 // if (!object->IsJSValue()) return object.
4115 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4116 leave.Branch(not_equal);
4117 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4118 object.Unuse();
4119 frame_->SetElementAt(0, &temp);
4120 leave.Bind();
4121}
4122
4123
4124// -----------------------------------------------------------------------------
4125// CodeGenerator implementation of Expressions
4126
4127void CodeGenerator::LoadAndSpill(Expression* expression,
4128 TypeofState typeof_state) {
4129 // TODO(x64): No architecture specific code. Move to shared location.
4130 ASSERT(in_spilled_code());
4131 set_in_spilled_code(false);
4132 Load(expression, typeof_state);
4133 frame_->SpillAll();
4134 set_in_spilled_code(true);
4135}
4136
4137
4138void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
4139#ifdef DEBUG
4140 int original_height = frame_->height();
4141#endif
4142 ASSERT(!in_spilled_code());
4143 JumpTarget true_target;
4144 JumpTarget false_target;
4145 ControlDestination dest(&true_target, &false_target, true);
4146 LoadCondition(x, typeof_state, &dest, false);
4147
4148 if (dest.false_was_fall_through()) {
4149 // The false target was just bound.
4150 JumpTarget loaded;
4151 frame_->Push(Factory::false_value());
4152 // There may be dangling jumps to the true target.
4153 if (true_target.is_linked()) {
4154 loaded.Jump();
4155 true_target.Bind();
4156 frame_->Push(Factory::true_value());
4157 loaded.Bind();
4158 }
4159
4160 } else if (dest.is_used()) {
4161 // There is true, and possibly false, control flow (with true as
4162 // the fall through).
4163 JumpTarget loaded;
4164 frame_->Push(Factory::true_value());
4165 if (false_target.is_linked()) {
4166 loaded.Jump();
4167 false_target.Bind();
4168 frame_->Push(Factory::false_value());
4169 loaded.Bind();
4170 }
4171
4172 } else {
4173 // We have a valid value on top of the frame, but we still may
4174 // have dangling jumps to the true and false targets from nested
4175 // subexpressions (eg, the left subexpressions of the
4176 // short-circuited boolean operators).
4177 ASSERT(has_valid_frame());
4178 if (true_target.is_linked() || false_target.is_linked()) {
4179 JumpTarget loaded;
4180 loaded.Jump(); // Don't lose the current TOS.
4181 if (true_target.is_linked()) {
4182 true_target.Bind();
4183 frame_->Push(Factory::true_value());
4184 if (false_target.is_linked()) {
4185 loaded.Jump();
4186 }
4187 }
4188 if (false_target.is_linked()) {
4189 false_target.Bind();
4190 frame_->Push(Factory::false_value());
4191 }
4192 loaded.Bind();
4193 }
4194 }
4195
4196 ASSERT(has_valid_frame());
4197 ASSERT(frame_->height() == original_height + 1);
4198}
4199
4200
4201// Emit code to load the value of an expression to the top of the
4202// frame. If the expression is boolean-valued it may be compiled (or
4203// partially compiled) into control flow to the control destination.
4204// If force_control is true, control flow is forced.
4205void CodeGenerator::LoadCondition(Expression* x,
4206 TypeofState typeof_state,
4207 ControlDestination* dest,
4208 bool force_control) {
4209 ASSERT(!in_spilled_code());
4210 int original_height = frame_->height();
4211
4212 { CodeGenState new_state(this, typeof_state, dest);
4213 Visit(x);
4214
4215 // If we hit a stack overflow, we may not have actually visited
4216 // the expression. In that case, we ensure that we have a
4217 // valid-looking frame state because we will continue to generate
4218 // code as we unwind the C++ stack.
4219 //
4220 // It's possible to have both a stack overflow and a valid frame
4221 // state (eg, a subexpression overflowed, visiting it returned
4222 // with a dummied frame state, and visiting this expression
4223 // returned with a normal-looking state).
4224 if (HasStackOverflow() &&
4225 !dest->is_used() &&
4226 frame_->height() == original_height) {
4227 dest->Goto(true);
4228 }
4229 }
4230
4231 if (force_control && !dest->is_used()) {
4232 // Convert the TOS value into flow to the control destination.
4233 // TODO(X64): Make control flow to control destinations work.
4234 ToBoolean(dest);
4235 }
4236
4237 ASSERT(!(force_control && !dest->is_used()));
4238 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4239}
4240
4241
Steve Blocka7e24c12009-10-30 11:49:00 +00004242// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4243// convert it to a boolean in the condition code register or jump to
4244// 'false_target'/'true_target' as appropriate.
4245void CodeGenerator::ToBoolean(ControlDestination* dest) {
4246 Comment cmnt(masm_, "[ ToBoolean");
4247
4248 // The value to convert should be popped from the frame.
4249 Result value = frame_->Pop();
4250 value.ToRegister();
4251 // Fast case checks.
4252
4253 // 'false' => false.
4254 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4255 dest->false_target()->Branch(equal);
4256
4257 // 'true' => true.
4258 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4259 dest->true_target()->Branch(equal);
4260
4261 // 'undefined' => false.
4262 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4263 dest->false_target()->Branch(equal);
4264
4265 // Smi => false iff zero.
Steve Block3ce2e202009-11-05 08:53:23 +00004266 __ SmiCompare(value.reg(), Smi::FromInt(0));
4267 dest->false_target()->Branch(equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00004268 Condition is_smi = masm_->CheckSmi(value.reg());
4269 dest->true_target()->Branch(is_smi);
4270
4271 // Call the stub for all other cases.
4272 frame_->Push(&value); // Undo the Pop() from above.
4273 ToBooleanStub stub;
4274 Result temp = frame_->CallStub(&stub, 1);
4275 // Convert the result to a condition code.
4276 __ testq(temp.reg(), temp.reg());
4277 temp.Unuse();
4278 dest->Split(not_equal);
4279}
4280
4281
4282void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4283 UNIMPLEMENTED();
4284 // TODO(X64): Implement security policy for loads of smis.
4285}
4286
4287
4288bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4289 return false;
4290}
4291
4292//------------------------------------------------------------------------------
4293// CodeGenerator implementation of variables, lookups, and stores.
4294
4295Reference::Reference(CodeGenerator* cgen, Expression* expression)
4296 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
4297 cgen->LoadReference(this);
4298}
4299
4300
4301Reference::~Reference() {
4302 cgen_->UnloadReference(this);
4303}
4304
4305
4306void CodeGenerator::LoadReference(Reference* ref) {
4307 // References are loaded from both spilled and unspilled code. Set the
4308 // state to unspilled to allow that (and explicitly spill after
4309 // construction at the construction sites).
4310 bool was_in_spilled_code = in_spilled_code_;
4311 in_spilled_code_ = false;
4312
4313 Comment cmnt(masm_, "[ LoadReference");
4314 Expression* e = ref->expression();
4315 Property* property = e->AsProperty();
4316 Variable* var = e->AsVariableProxy()->AsVariable();
4317
4318 if (property != NULL) {
4319 // The expression is either a property or a variable proxy that rewrites
4320 // to a property.
4321 Load(property->obj());
4322 // We use a named reference if the key is a literal symbol, unless it is
4323 // a string that can be legally parsed as an integer. This is because
4324 // otherwise we will not get into the slow case code that handles [] on
4325 // String objects.
4326 Literal* literal = property->key()->AsLiteral();
4327 uint32_t dummy;
4328 if (literal != NULL &&
4329 literal->handle()->IsSymbol() &&
4330 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
4331 ref->set_type(Reference::NAMED);
4332 } else {
4333 Load(property->key());
4334 ref->set_type(Reference::KEYED);
4335 }
4336 } else if (var != NULL) {
4337 // The expression is a variable proxy that does not rewrite to a
4338 // property. Global variables are treated as named property references.
4339 if (var->is_global()) {
4340 LoadGlobal();
4341 ref->set_type(Reference::NAMED);
4342 } else {
4343 ASSERT(var->slot() != NULL);
4344 ref->set_type(Reference::SLOT);
4345 }
4346 } else {
4347 // Anything else is a runtime error.
4348 Load(e);
4349 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4350 }
4351
4352 in_spilled_code_ = was_in_spilled_code;
4353}
4354
4355
4356void CodeGenerator::UnloadReference(Reference* ref) {
4357 // Pop a reference from the stack while preserving TOS.
4358 Comment cmnt(masm_, "[ UnloadReference");
4359 frame_->Nip(ref->size());
4360}
4361
4362
4363Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4364 // Currently, this assertion will fail if we try to assign to
4365 // a constant variable that is constant because it is read-only
4366 // (such as the variable referring to a named function expression).
4367 // We need to implement assignments to read-only variables.
4368 // Ideally, we should do this during AST generation (by converting
4369 // such assignments into expression statements); however, in general
4370 // we may not be able to make the decision until past AST generation,
4371 // that is when the entire program is known.
4372 ASSERT(slot != NULL);
4373 int index = slot->index();
4374 switch (slot->type()) {
4375 case Slot::PARAMETER:
4376 return frame_->ParameterAt(index);
4377
4378 case Slot::LOCAL:
4379 return frame_->LocalAt(index);
4380
4381 case Slot::CONTEXT: {
4382 // Follow the context chain if necessary.
4383 ASSERT(!tmp.is(rsi)); // do not overwrite context register
4384 Register context = rsi;
4385 int chain_length = scope()->ContextChainLength(slot->var()->scope());
4386 for (int i = 0; i < chain_length; i++) {
4387 // Load the closure.
4388 // (All contexts, even 'with' contexts, have a closure,
4389 // and it is the same for all contexts inside a function.
4390 // There is no need to go to the function context first.)
4391 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4392 // Load the function context (which is the incoming, outer context).
4393 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4394 context = tmp;
4395 }
4396 // We may have a 'with' context now. Get the function context.
4397 // (In fact this mov may never be the needed, since the scope analysis
4398 // may not permit a direct context access in this case and thus we are
4399 // always at a function context. However it is safe to dereference be-
4400 // cause the function context of a function context is itself. Before
4401 // deleting this mov we should try to create a counter-example first,
4402 // though...)
4403 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4404 return ContextOperand(tmp, index);
4405 }
4406
4407 default:
4408 UNREACHABLE();
4409 return Operand(rsp, 0);
4410 }
4411}
4412
4413
4414Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4415 Result tmp,
4416 JumpTarget* slow) {
4417 ASSERT(slot->type() == Slot::CONTEXT);
4418 ASSERT(tmp.is_register());
4419 Register context = rsi;
4420
4421 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4422 if (s->num_heap_slots() > 0) {
4423 if (s->calls_eval()) {
4424 // Check that extension is NULL.
4425 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4426 Immediate(0));
4427 slow->Branch(not_equal, not_taken);
4428 }
4429 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4430 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4431 context = tmp.reg();
4432 }
4433 }
4434 // Check that last extension is NULL.
4435 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4436 slow->Branch(not_equal, not_taken);
4437 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4438 return ContextOperand(tmp.reg(), slot->index());
4439}
4440
4441
4442void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4443 if (slot->type() == Slot::LOOKUP) {
4444 ASSERT(slot->var()->is_dynamic());
4445
4446 JumpTarget slow;
4447 JumpTarget done;
4448 Result value;
4449
4450 // Generate fast-case code for variables that might be shadowed by
4451 // eval-introduced variables. Eval is used a lot without
4452 // introducing variables. In those cases, we do not want to
4453 // perform a runtime call for all variables in the scope
4454 // containing the eval.
4455 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4456 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4457 // If there was no control flow to slow, we can exit early.
4458 if (!slow.is_linked()) {
4459 frame_->Push(&value);
4460 return;
4461 }
4462
4463 done.Jump(&value);
4464
4465 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4466 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4467 // Only generate the fast case for locals that rewrite to slots.
4468 // This rules out argument loads.
4469 if (potential_slot != NULL) {
4470 // Allocate a fresh register to use as a temp in
4471 // ContextSlotOperandCheckExtensions and to hold the result
4472 // value.
4473 value = allocator_->Allocate();
4474 ASSERT(value.is_valid());
4475 __ movq(value.reg(),
4476 ContextSlotOperandCheckExtensions(potential_slot,
4477 value,
4478 &slow));
4479 if (potential_slot->var()->mode() == Variable::CONST) {
4480 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4481 done.Branch(not_equal, &value);
4482 __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4483 }
4484 // There is always control flow to slow from
4485 // ContextSlotOperandCheckExtensions so we have to jump around
4486 // it.
4487 done.Jump(&value);
4488 }
4489 }
4490
4491 slow.Bind();
4492 // A runtime call is inevitable. We eagerly sync frame elements
4493 // to memory so that we can push the arguments directly into place
4494 // on top of the frame.
4495 frame_->SyncRange(0, frame_->element_count() - 1);
4496 frame_->EmitPush(rsi);
4497 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4498 frame_->EmitPush(kScratchRegister);
4499 if (typeof_state == INSIDE_TYPEOF) {
4500 value =
4501 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4502 } else {
4503 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4504 }
4505
4506 done.Bind(&value);
4507 frame_->Push(&value);
4508
4509 } else if (slot->var()->mode() == Variable::CONST) {
4510 // Const slots may contain 'the hole' value (the constant hasn't been
4511 // initialized yet) which needs to be converted into the 'undefined'
4512 // value.
4513 //
4514 // We currently spill the virtual frame because constants use the
4515 // potentially unsafe direct-frame access of SlotOperand.
4516 VirtualFrame::SpilledScope spilled_scope;
4517 Comment cmnt(masm_, "[ Load const");
4518 JumpTarget exit;
4519 __ movq(rcx, SlotOperand(slot, rcx));
4520 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4521 exit.Branch(not_equal);
4522 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4523 exit.Bind();
4524 frame_->EmitPush(rcx);
4525
4526 } else if (slot->type() == Slot::PARAMETER) {
4527 frame_->PushParameterAt(slot->index());
4528
4529 } else if (slot->type() == Slot::LOCAL) {
4530 frame_->PushLocalAt(slot->index());
4531
4532 } else {
4533 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4534 // here.
4535 //
4536 // The use of SlotOperand below is safe for an unspilled frame
4537 // because it will always be a context slot.
4538 ASSERT(slot->type() == Slot::CONTEXT);
4539 Result temp = allocator_->Allocate();
4540 ASSERT(temp.is_valid());
4541 __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4542 frame_->Push(&temp);
4543 }
4544}
4545
4546
4547void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4548 TypeofState state) {
4549 LoadFromSlot(slot, state);
4550
4551 // Bail out quickly if we're not using lazy arguments allocation.
4552 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4553
4554 // ... or if the slot isn't a non-parameter arguments slot.
4555 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4556
4557 // Pop the loaded value from the stack.
4558 Result value = frame_->Pop();
4559
4560 // If the loaded value is a constant, we know if the arguments
4561 // object has been lazily loaded yet.
4562 if (value.is_constant()) {
4563 if (value.handle()->IsTheHole()) {
4564 Result arguments = StoreArgumentsObject(false);
4565 frame_->Push(&arguments);
4566 } else {
4567 frame_->Push(&value);
4568 }
4569 return;
4570 }
4571
4572 // The loaded value is in a register. If it is the sentinel that
4573 // indicates that we haven't loaded the arguments object yet, we
4574 // need to do it now.
4575 JumpTarget exit;
4576 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4577 frame_->Push(&value);
4578 exit.Branch(not_equal);
4579 Result arguments = StoreArgumentsObject(false);
4580 frame_->SetElementAt(0, &arguments);
4581 exit.Bind();
4582}
4583
4584
4585void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4586 if (slot->type() == Slot::LOOKUP) {
4587 ASSERT(slot->var()->is_dynamic());
4588
4589 // For now, just do a runtime call. Since the call is inevitable,
4590 // we eagerly sync the virtual frame so we can directly push the
4591 // arguments into place.
4592 frame_->SyncRange(0, frame_->element_count() - 1);
4593
4594 frame_->EmitPush(rsi);
4595 frame_->EmitPush(slot->var()->name());
4596
4597 Result value;
4598 if (init_state == CONST_INIT) {
4599 // Same as the case for a normal store, but ignores attribute
4600 // (e.g. READ_ONLY) of context slot so that we can initialize const
4601 // properties (introduced via eval("const foo = (some expr);")). Also,
4602 // uses the current function context instead of the top context.
4603 //
4604 // Note that we must declare the foo upon entry of eval(), via a
4605 // context slot declaration, but we cannot initialize it at the same
4606 // time, because the const declaration may be at the end of the eval
4607 // code (sigh...) and the const variable may have been used before
4608 // (where its value is 'undefined'). Thus, we can only do the
4609 // initialization when we actually encounter the expression and when
4610 // the expression operands are defined and valid, and thus we need the
4611 // split into 2 operations: declaration of the context slot followed
4612 // by initialization.
4613 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4614 } else {
4615 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4616 }
4617 // Storing a variable must keep the (new) value on the expression
4618 // stack. This is necessary for compiling chained assignment
4619 // expressions.
4620 frame_->Push(&value);
4621 } else {
4622 ASSERT(!slot->var()->is_dynamic());
4623
4624 JumpTarget exit;
4625 if (init_state == CONST_INIT) {
4626 ASSERT(slot->var()->mode() == Variable::CONST);
4627 // Only the first const initialization must be executed (the slot
4628 // still contains 'the hole' value). When the assignment is executed,
4629 // the code is identical to a normal store (see below).
4630 //
4631 // We spill the frame in the code below because the direct-frame
4632 // access of SlotOperand is potentially unsafe with an unspilled
4633 // frame.
4634 VirtualFrame::SpilledScope spilled_scope;
4635 Comment cmnt(masm_, "[ Init const");
4636 __ movq(rcx, SlotOperand(slot, rcx));
4637 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4638 exit.Branch(not_equal);
4639 }
4640
4641 // We must execute the store. Storing a variable must keep the (new)
4642 // value on the stack. This is necessary for compiling assignment
4643 // expressions.
4644 //
4645 // Note: We will reach here even with slot->var()->mode() ==
4646 // Variable::CONST because of const declarations which will initialize
4647 // consts to 'the hole' value and by doing so, end up calling this code.
4648 if (slot->type() == Slot::PARAMETER) {
4649 frame_->StoreToParameterAt(slot->index());
4650 } else if (slot->type() == Slot::LOCAL) {
4651 frame_->StoreToLocalAt(slot->index());
4652 } else {
4653 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4654 //
4655 // The use of SlotOperand below is safe for an unspilled frame
4656 // because the slot is a context slot.
4657 ASSERT(slot->type() == Slot::CONTEXT);
4658 frame_->Dup();
4659 Result value = frame_->Pop();
4660 value.ToRegister();
4661 Result start = allocator_->Allocate();
4662 ASSERT(start.is_valid());
4663 __ movq(SlotOperand(slot, start.reg()), value.reg());
4664 // RecordWrite may destroy the value registers.
4665 //
4666 // TODO(204): Avoid actually spilling when the value is not
4667 // needed (probably the common case).
4668 frame_->Spill(value.reg());
4669 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4670 Result temp = allocator_->Allocate();
4671 ASSERT(temp.is_valid());
4672 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4673 // The results start, value, and temp are unused by going out of
4674 // scope.
4675 }
4676
4677 exit.Bind();
4678 }
4679}
4680
4681
4682Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4683 Slot* slot,
4684 TypeofState typeof_state,
4685 JumpTarget* slow) {
4686 // Check that no extension objects have been created by calls to
4687 // eval from the current scope to the global scope.
4688 Register context = rsi;
4689 Result tmp = allocator_->Allocate();
4690 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
4691
4692 Scope* s = scope();
4693 while (s != NULL) {
4694 if (s->num_heap_slots() > 0) {
4695 if (s->calls_eval()) {
4696 // Check that extension is NULL.
4697 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4698 Immediate(0));
4699 slow->Branch(not_equal, not_taken);
4700 }
4701 // Load next context in chain.
4702 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4703 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4704 context = tmp.reg();
4705 }
4706 // If no outer scope calls eval, we do not need to check more
4707 // context extensions. If we have reached an eval scope, we check
4708 // all extensions from this point.
4709 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4710 s = s->outer_scope();
4711 }
4712
4713 if (s->is_eval_scope()) {
4714 // Loop up the context chain. There is no frame effect so it is
4715 // safe to use raw labels here.
4716 Label next, fast;
4717 if (!context.is(tmp.reg())) {
4718 __ movq(tmp.reg(), context);
4719 }
4720 // Load map for comparison into register, outside loop.
4721 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4722 __ bind(&next);
4723 // Terminate at global context.
4724 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4725 __ j(equal, &fast);
4726 // Check that extension is NULL.
4727 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4728 slow->Branch(not_equal);
4729 // Load next context in chain.
4730 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4731 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4732 __ jmp(&next);
4733 __ bind(&fast);
4734 }
4735 tmp.Unuse();
4736
4737 // All extension objects were empty and it is safe to use a global
4738 // load IC call.
4739 LoadGlobal();
4740 frame_->Push(slot->var()->name());
4741 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4742 ? RelocInfo::CODE_TARGET
4743 : RelocInfo::CODE_TARGET_CONTEXT;
4744 Result answer = frame_->CallLoadIC(mode);
4745 // A test rax instruction following the call signals that the inobject
4746 // property case was inlined. Ensure that there is not a test rax
4747 // instruction here.
4748 masm_->nop();
4749 // Discard the global object. The result is in answer.
4750 frame_->Drop();
4751 return answer;
4752}
4753
4754
4755void CodeGenerator::LoadGlobal() {
4756 if (in_spilled_code()) {
4757 frame_->EmitPush(GlobalObject());
4758 } else {
4759 Result temp = allocator_->Allocate();
4760 __ movq(temp.reg(), GlobalObject());
4761 frame_->Push(&temp);
4762 }
4763}
4764
4765
4766void CodeGenerator::LoadGlobalReceiver() {
4767 Result temp = allocator_->Allocate();
4768 Register reg = temp.reg();
4769 __ movq(reg, GlobalObject());
4770 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
4771 frame_->Push(&temp);
4772}
4773
4774
4775ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
4776 if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
4777 ASSERT(scope_->arguments_shadow() != NULL);
4778 // We don't want to do lazy arguments allocation for functions that
4779 // have heap-allocated contexts, because it interfers with the
4780 // uninitialized const tracking in the context objects.
4781 return (scope_->num_heap_slots() > 0)
4782 ? EAGER_ARGUMENTS_ALLOCATION
4783 : LAZY_ARGUMENTS_ALLOCATION;
4784}
4785
4786
4787Result CodeGenerator::StoreArgumentsObject(bool initial) {
4788 ArgumentsAllocationMode mode = ArgumentsMode();
4789 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
4790
4791 Comment cmnt(masm_, "[ store arguments object");
4792 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
4793 // When using lazy arguments allocation, we store the hole value
4794 // as a sentinel indicating that the arguments object hasn't been
4795 // allocated yet.
4796 frame_->Push(Factory::the_hole_value());
4797 } else {
4798 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
4799 frame_->PushFunction();
4800 frame_->PushReceiverSlotAddress();
4801 frame_->Push(Smi::FromInt(scope_->num_parameters()));
4802 Result result = frame_->CallStub(&stub, 3);
4803 frame_->Push(&result);
4804 }
4805
4806 { Reference shadow_ref(this, scope_->arguments_shadow());
4807 Reference arguments_ref(this, scope_->arguments());
4808 ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
4809 // Here we rely on the convenient property that references to slot
4810 // take up zero space in the frame (ie, it doesn't matter that the
4811 // stored value is actually below the reference on the frame).
4812 JumpTarget done;
4813 bool skip_arguments = false;
4814 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
4815 // We have to skip storing into the arguments slot if it has
4816 // already been written to. This can happen if the a function
4817 // has a local variable named 'arguments'.
4818 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
4819 Result arguments = frame_->Pop();
4820 if (arguments.is_constant()) {
4821 // We have to skip updating the arguments object if it has
4822 // been assigned a proper value.
4823 skip_arguments = !arguments.handle()->IsTheHole();
4824 } else {
4825 __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
4826 arguments.Unuse();
4827 done.Branch(not_equal);
4828 }
4829 }
4830 if (!skip_arguments) {
4831 arguments_ref.SetValue(NOT_CONST_INIT);
4832 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
4833 }
4834 shadow_ref.SetValue(NOT_CONST_INIT);
4835 }
4836 return frame_->Pop();
4837}
4838
4839
4840// TODO(1241834): Get rid of this function in favor of just using Load, now
4841// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
4842// variables w/o reference errors elsewhere.
4843void CodeGenerator::LoadTypeofExpression(Expression* x) {
4844 Variable* variable = x->AsVariableProxy()->AsVariable();
4845 if (variable != NULL && !variable->is_this() && variable->is_global()) {
4846 // NOTE: This is somewhat nasty. We force the compiler to load
4847 // the variable as if through '<global>.<variable>' to make sure we
4848 // do not get reference errors.
4849 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
4850 Literal key(variable->name());
4851 // TODO(1241834): Fetch the position from the variable instead of using
4852 // no position.
4853 Property property(&global, &key, RelocInfo::kNoPosition);
4854 Load(&property);
4855 } else {
4856 Load(x, INSIDE_TYPEOF);
4857 }
4858}
4859
4860
4861void CodeGenerator::Comparison(Condition cc,
4862 bool strict,
4863 ControlDestination* dest) {
4864 // Strict only makes sense for equality comparisons.
4865 ASSERT(!strict || cc == equal);
4866
4867 Result left_side;
4868 Result right_side;
4869 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
4870 if (cc == greater || cc == less_equal) {
4871 cc = ReverseCondition(cc);
4872 left_side = frame_->Pop();
4873 right_side = frame_->Pop();
4874 } else {
4875 right_side = frame_->Pop();
4876 left_side = frame_->Pop();
4877 }
4878 ASSERT(cc == less || cc == equal || cc == greater_equal);
4879
4880 // If either side is a constant smi, optimize the comparison.
4881 bool left_side_constant_smi =
4882 left_side.is_constant() && left_side.handle()->IsSmi();
4883 bool right_side_constant_smi =
4884 right_side.is_constant() && right_side.handle()->IsSmi();
4885 bool left_side_constant_null =
4886 left_side.is_constant() && left_side.handle()->IsNull();
4887 bool right_side_constant_null =
4888 right_side.is_constant() && right_side.handle()->IsNull();
4889
4890 if (left_side_constant_smi || right_side_constant_smi) {
4891 if (left_side_constant_smi && right_side_constant_smi) {
4892 // Trivial case, comparing two constants.
4893 int left_value = Smi::cast(*left_side.handle())->value();
4894 int right_value = Smi::cast(*right_side.handle())->value();
4895 switch (cc) {
4896 case less:
4897 dest->Goto(left_value < right_value);
4898 break;
4899 case equal:
4900 dest->Goto(left_value == right_value);
4901 break;
4902 case greater_equal:
4903 dest->Goto(left_value >= right_value);
4904 break;
4905 default:
4906 UNREACHABLE();
4907 }
4908 } else { // Only one side is a constant Smi.
4909 // If left side is a constant Smi, reverse the operands.
4910 // Since one side is a constant Smi, conversion order does not matter.
4911 if (left_side_constant_smi) {
4912 Result temp = left_side;
4913 left_side = right_side;
4914 right_side = temp;
4915 cc = ReverseCondition(cc);
4916 // This may reintroduce greater or less_equal as the value of cc.
4917 // CompareStub and the inline code both support all values of cc.
4918 }
4919 // Implement comparison against a constant Smi, inlining the case
4920 // where both sides are Smis.
4921 left_side.ToRegister();
4922
4923 // Here we split control flow to the stub call and inlined cases
4924 // before finally splitting it to the control destination. We use
4925 // a jump target and branching to duplicate the virtual frame at
4926 // the first split. We manually handle the off-frame references
4927 // by reconstituting them on the non-fall-through path.
4928 JumpTarget is_smi;
4929 Register left_reg = left_side.reg();
4930 Handle<Object> right_val = right_side.handle();
4931
4932 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
4933 is_smi.Branch(left_is_smi);
4934
4935 // Setup and call the compare stub.
4936 CompareStub stub(cc, strict);
4937 Result result = frame_->CallStub(&stub, &left_side, &right_side);
4938 result.ToRegister();
4939 __ testq(result.reg(), result.reg());
4940 result.Unuse();
4941 dest->true_target()->Branch(cc);
4942 dest->false_target()->Jump();
4943
4944 is_smi.Bind();
4945 left_side = Result(left_reg);
4946 right_side = Result(right_val);
4947 // Test smi equality and comparison by signed int comparison.
4948 // Both sides are smis, so we can use an Immediate.
Steve Block3ce2e202009-11-05 08:53:23 +00004949 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004950 left_side.Unuse();
4951 right_side.Unuse();
4952 dest->Split(cc);
4953 }
4954 } else if (cc == equal &&
4955 (left_side_constant_null || right_side_constant_null)) {
4956 // To make null checks efficient, we check if either the left side or
4957 // the right side is the constant 'null'.
4958 // If so, we optimize the code by inlining a null check instead of
4959 // calling the (very) general runtime routine for checking equality.
4960 Result operand = left_side_constant_null ? right_side : left_side;
4961 right_side.Unuse();
4962 left_side.Unuse();
4963 operand.ToRegister();
4964 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
4965 if (strict) {
4966 operand.Unuse();
4967 dest->Split(equal);
4968 } else {
4969 // The 'null' value is only equal to 'undefined' if using non-strict
4970 // comparisons.
4971 dest->true_target()->Branch(equal);
4972 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
4973 dest->true_target()->Branch(equal);
4974 Condition is_smi = masm_->CheckSmi(operand.reg());
4975 dest->false_target()->Branch(is_smi);
4976
4977 // It can be an undetectable object.
4978 // Use a scratch register in preference to spilling operand.reg().
4979 Result temp = allocator()->Allocate();
4980 ASSERT(temp.is_valid());
4981 __ movq(temp.reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00004982 FieldOperand(operand.reg(), HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004983 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
4984 Immediate(1 << Map::kIsUndetectable));
4985 temp.Unuse();
4986 operand.Unuse();
4987 dest->Split(not_zero);
4988 }
4989 } else { // Neither side is a constant Smi or null.
4990 // If either side is a non-smi constant, skip the smi check.
4991 bool known_non_smi =
4992 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
4993 (right_side.is_constant() && !right_side.handle()->IsSmi());
4994 left_side.ToRegister();
4995 right_side.ToRegister();
4996
4997 if (known_non_smi) {
4998 // When non-smi, call out to the compare stub.
4999 CompareStub stub(cc, strict);
5000 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5001 // The result is a Smi, which is negative, zero, or positive.
Steve Block3ce2e202009-11-05 08:53:23 +00005002 __ SmiTest(answer.reg()); // Sets both zero and sign flag.
Steve Blocka7e24c12009-10-30 11:49:00 +00005003 answer.Unuse();
5004 dest->Split(cc);
5005 } else {
5006 // Here we split control flow to the stub call and inlined cases
5007 // before finally splitting it to the control destination. We use
5008 // a jump target and branching to duplicate the virtual frame at
5009 // the first split. We manually handle the off-frame references
5010 // by reconstituting them on the non-fall-through path.
5011 JumpTarget is_smi;
5012 Register left_reg = left_side.reg();
5013 Register right_reg = right_side.reg();
5014
5015 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5016 is_smi.Branch(both_smi);
5017 // When non-smi, call out to the compare stub.
5018 CompareStub stub(cc, strict);
5019 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
Steve Block3ce2e202009-11-05 08:53:23 +00005020 __ SmiTest(answer.reg()); // Sets both zero and sign flags.
Steve Blocka7e24c12009-10-30 11:49:00 +00005021 answer.Unuse();
5022 dest->true_target()->Branch(cc);
5023 dest->false_target()->Jump();
5024
5025 is_smi.Bind();
5026 left_side = Result(left_reg);
5027 right_side = Result(right_reg);
Steve Block3ce2e202009-11-05 08:53:23 +00005028 __ SmiCompare(left_side.reg(), right_side.reg());
Steve Blocka7e24c12009-10-30 11:49:00 +00005029 right_side.Unuse();
5030 left_side.Unuse();
5031 dest->Split(cc);
5032 }
5033 }
5034}
5035
5036
5037class DeferredInlineBinaryOperation: public DeferredCode {
5038 public:
5039 DeferredInlineBinaryOperation(Token::Value op,
5040 Register dst,
5041 Register left,
5042 Register right,
5043 OverwriteMode mode)
5044 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5045 set_comment("[ DeferredInlineBinaryOperation");
5046 }
5047
5048 virtual void Generate();
5049
5050 private:
5051 Token::Value op_;
5052 Register dst_;
5053 Register left_;
5054 Register right_;
5055 OverwriteMode mode_;
5056};
5057
5058
5059void DeferredInlineBinaryOperation::Generate() {
5060 __ push(left_);
5061 __ push(right_);
5062 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
5063 __ CallStub(&stub);
5064 if (!dst_.is(rax)) __ movq(dst_, rax);
5065}
5066
5067
5068void CodeGenerator::GenericBinaryOperation(Token::Value op,
5069 SmiAnalysis* type,
5070 OverwriteMode overwrite_mode) {
5071 Comment cmnt(masm_, "[ BinaryOperation");
5072 Comment cmnt_token(masm_, Token::String(op));
5073
5074 if (op == Token::COMMA) {
5075 // Simply discard left value.
5076 frame_->Nip(1);
5077 return;
5078 }
5079
5080 // Set the flags based on the operation, type and loop nesting level.
5081 GenericBinaryFlags flags;
5082 switch (op) {
5083 case Token::BIT_OR:
5084 case Token::BIT_AND:
5085 case Token::BIT_XOR:
5086 case Token::SHL:
5087 case Token::SHR:
5088 case Token::SAR:
5089 // Bit operations always assume they likely operate on Smis. Still only
5090 // generate the inline Smi check code if this operation is part of a loop.
5091 flags = (loop_nesting() > 0)
5092 ? SMI_CODE_INLINED
5093 : SMI_CODE_IN_STUB;
5094 break;
5095
5096 default:
5097 // By default only inline the Smi check code for likely smis if this
5098 // operation is part of a loop.
5099 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
5100 ? SMI_CODE_INLINED
5101 : SMI_CODE_IN_STUB;
5102 break;
5103 }
5104
5105 Result right = frame_->Pop();
5106 Result left = frame_->Pop();
5107
5108 if (op == Token::ADD) {
5109 bool left_is_string = left.is_constant() && left.handle()->IsString();
5110 bool right_is_string = right.is_constant() && right.handle()->IsString();
5111 if (left_is_string || right_is_string) {
5112 frame_->Push(&left);
5113 frame_->Push(&right);
5114 Result answer;
5115 if (left_is_string) {
5116 if (right_is_string) {
5117 // TODO(lrn): if both are constant strings
5118 // -- do a compile time cons, if allocation during codegen is allowed.
5119 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
5120 } else {
5121 answer =
5122 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5123 }
5124 } else if (right_is_string) {
5125 answer =
5126 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5127 }
5128 frame_->Push(&answer);
5129 return;
5130 }
5131 // Neither operand is known to be a string.
5132 }
5133
5134 bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
5135 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
5136 bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
5137 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
5138 bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
5139
5140 if (left_is_smi && right_is_smi) {
5141 // Compute the constant result at compile time, and leave it on the frame.
5142 int left_int = Smi::cast(*left.handle())->value();
5143 int right_int = Smi::cast(*right.handle())->value();
5144 if (FoldConstantSmis(op, left_int, right_int)) return;
5145 }
5146
5147 if (left_is_non_smi || right_is_non_smi) {
5148 // Set flag so that we go straight to the slow case, with no smi code.
5149 generate_no_smi_code = true;
5150 } else if (right_is_smi) {
5151 ConstantSmiBinaryOperation(op, &left, right.handle(),
5152 type, false, overwrite_mode);
5153 return;
5154 } else if (left_is_smi) {
5155 ConstantSmiBinaryOperation(op, &right, left.handle(),
5156 type, true, overwrite_mode);
5157 return;
5158 }
5159
5160 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
5161 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5162 } else {
5163 frame_->Push(&left);
5164 frame_->Push(&right);
5165 // If we know the arguments aren't smis, use the binary operation stub
5166 // that does not check for the fast smi case.
5167 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
5168 if (generate_no_smi_code) {
5169 flags = SMI_CODE_INLINED;
5170 }
5171 GenericBinaryOpStub stub(op, overwrite_mode, flags);
5172 Result answer = frame_->CallStub(&stub, 2);
5173 frame_->Push(&answer);
5174 }
5175}
5176
5177
5178// Emit a LoadIC call to get the value from receiver and leave it in
5179// dst. The receiver register is restored after the call.
5180class DeferredReferenceGetNamedValue: public DeferredCode {
5181 public:
5182 DeferredReferenceGetNamedValue(Register dst,
5183 Register receiver,
5184 Handle<String> name)
5185 : dst_(dst), receiver_(receiver), name_(name) {
5186 set_comment("[ DeferredReferenceGetNamedValue");
5187 }
5188
5189 virtual void Generate();
5190
5191 Label* patch_site() { return &patch_site_; }
5192
5193 private:
5194 Label patch_site_;
5195 Register dst_;
5196 Register receiver_;
5197 Handle<String> name_;
5198};
5199
5200
5201void DeferredReferenceGetNamedValue::Generate() {
5202 __ push(receiver_);
5203 __ Move(rcx, name_);
5204 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5205 __ Call(ic, RelocInfo::CODE_TARGET);
5206 // The call must be followed by a test rax instruction to indicate
5207 // that the inobject property case was inlined.
5208 //
5209 // Store the delta to the map check instruction here in the test
5210 // instruction. Use masm_-> instead of the __ macro since the
5211 // latter can't return a value.
5212 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5213 // Here we use masm_-> instead of the __ macro because this is the
5214 // instruction that gets patched and coverage code gets in the way.
5215 masm_->testl(rax, Immediate(-delta_to_patch_site));
5216 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5217
5218 if (!dst_.is(rax)) __ movq(dst_, rax);
5219 __ pop(receiver_);
5220}
5221
5222
5223void DeferredInlineSmiAdd::Generate() {
5224 __ push(dst_);
Steve Block3ce2e202009-11-05 08:53:23 +00005225 __ Push(value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005226 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5227 __ CallStub(&igostub);
5228 if (!dst_.is(rax)) __ movq(dst_, rax);
5229}
5230
5231
5232void DeferredInlineSmiAddReversed::Generate() {
Steve Block3ce2e202009-11-05 08:53:23 +00005233 __ Push(value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005234 __ push(dst_);
5235 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5236 __ CallStub(&igostub);
5237 if (!dst_.is(rax)) __ movq(dst_, rax);
5238}
5239
5240
5241void DeferredInlineSmiSub::Generate() {
5242 __ push(dst_);
Steve Block3ce2e202009-11-05 08:53:23 +00005243 __ Push(value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005244 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
5245 __ CallStub(&igostub);
5246 if (!dst_.is(rax)) __ movq(dst_, rax);
5247}
5248
5249
5250void DeferredInlineSmiOperation::Generate() {
5251 __ push(src_);
Steve Block3ce2e202009-11-05 08:53:23 +00005252 __ Push(value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005253 // For mod we don't generate all the Smi code inline.
5254 GenericBinaryOpStub stub(
5255 op_,
5256 overwrite_mode_,
5257 (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
5258 __ CallStub(&stub);
5259 if (!dst_.is(rax)) __ movq(dst_, rax);
5260}
5261
5262
5263void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5264 Result* operand,
5265 Handle<Object> value,
5266 SmiAnalysis* type,
5267 bool reversed,
5268 OverwriteMode overwrite_mode) {
5269 // NOTE: This is an attempt to inline (a bit) more of the code for
5270 // some possible smi operations (like + and -) when (at least) one
5271 // of the operands is a constant smi.
5272 // Consumes the argument "operand".
5273
5274 // TODO(199): Optimize some special cases of operations involving a
5275 // smi literal (multiply by 2, shift by 0, etc.).
5276 if (IsUnsafeSmi(value)) {
5277 Result unsafe_operand(value);
5278 if (reversed) {
5279 LikelySmiBinaryOperation(op, &unsafe_operand, operand,
5280 overwrite_mode);
5281 } else {
5282 LikelySmiBinaryOperation(op, operand, &unsafe_operand,
5283 overwrite_mode);
5284 }
5285 ASSERT(!operand->is_valid());
5286 return;
5287 }
5288
5289 // Get the literal value.
5290 Smi* smi_value = Smi::cast(*value);
5291 int int_value = smi_value->value();
5292
5293 switch (op) {
5294 case Token::ADD: {
5295 operand->ToRegister();
5296 frame_->Spill(operand->reg());
5297 DeferredCode* deferred = NULL;
5298 if (reversed) {
5299 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5300 smi_value,
5301 overwrite_mode);
5302 } else {
5303 deferred = new DeferredInlineSmiAdd(operand->reg(),
5304 smi_value,
5305 overwrite_mode);
5306 }
5307 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5308 __ SmiAddConstant(operand->reg(),
5309 operand->reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005310 smi_value,
Steve Blocka7e24c12009-10-30 11:49:00 +00005311 deferred->entry_label());
5312 deferred->BindExit();
5313 frame_->Push(operand);
5314 break;
5315 }
5316
5317 case Token::SUB: {
5318 if (reversed) {
5319 Result constant_operand(value);
5320 LikelySmiBinaryOperation(op, &constant_operand, operand,
5321 overwrite_mode);
5322 } else {
5323 operand->ToRegister();
5324 frame_->Spill(operand->reg());
5325 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5326 smi_value,
5327 overwrite_mode);
5328 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5329 // A smi currently fits in a 32-bit Immediate.
5330 __ SmiSubConstant(operand->reg(),
5331 operand->reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005332 smi_value,
Steve Blocka7e24c12009-10-30 11:49:00 +00005333 deferred->entry_label());
5334 deferred->BindExit();
5335 frame_->Push(operand);
5336 }
5337 break;
5338 }
5339
5340 case Token::SAR:
5341 if (reversed) {
5342 Result constant_operand(value);
5343 LikelySmiBinaryOperation(op, &constant_operand, operand,
5344 overwrite_mode);
5345 } else {
5346 // Only the least significant 5 bits of the shift value are used.
5347 // In the slow case, this masking is done inside the runtime call.
5348 int shift_value = int_value & 0x1f;
5349 operand->ToRegister();
5350 frame_->Spill(operand->reg());
5351 DeferredInlineSmiOperation* deferred =
5352 new DeferredInlineSmiOperation(op,
5353 operand->reg(),
5354 operand->reg(),
5355 smi_value,
5356 overwrite_mode);
5357 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5358 __ SmiShiftArithmeticRightConstant(operand->reg(),
5359 operand->reg(),
5360 shift_value);
5361 deferred->BindExit();
5362 frame_->Push(operand);
5363 }
5364 break;
5365
5366 case Token::SHR:
5367 if (reversed) {
5368 Result constant_operand(value);
5369 LikelySmiBinaryOperation(op, &constant_operand, operand,
5370 overwrite_mode);
5371 } else {
5372 // Only the least significant 5 bits of the shift value are used.
5373 // In the slow case, this masking is done inside the runtime call.
5374 int shift_value = int_value & 0x1f;
5375 operand->ToRegister();
5376 Result answer = allocator()->Allocate();
5377 ASSERT(answer.is_valid());
5378 DeferredInlineSmiOperation* deferred =
5379 new DeferredInlineSmiOperation(op,
5380 answer.reg(),
5381 operand->reg(),
5382 smi_value,
5383 overwrite_mode);
5384 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5385 __ SmiShiftLogicalRightConstant(answer.reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005386 operand->reg(),
5387 shift_value,
5388 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00005389 deferred->BindExit();
5390 operand->Unuse();
5391 frame_->Push(&answer);
5392 }
5393 break;
5394
5395 case Token::SHL:
5396 if (reversed) {
5397 Result constant_operand(value);
5398 LikelySmiBinaryOperation(op, &constant_operand, operand,
5399 overwrite_mode);
5400 } else {
5401 // Only the least significant 5 bits of the shift value are used.
5402 // In the slow case, this masking is done inside the runtime call.
5403 int shift_value = int_value & 0x1f;
5404 operand->ToRegister();
5405 if (shift_value == 0) {
5406 // Spill operand so it can be overwritten in the slow case.
5407 frame_->Spill(operand->reg());
5408 DeferredInlineSmiOperation* deferred =
5409 new DeferredInlineSmiOperation(op,
5410 operand->reg(),
5411 operand->reg(),
5412 smi_value,
5413 overwrite_mode);
5414 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5415 deferred->BindExit();
5416 frame_->Push(operand);
5417 } else {
5418 // Use a fresh temporary for nonzero shift values.
5419 Result answer = allocator()->Allocate();
5420 ASSERT(answer.is_valid());
5421 DeferredInlineSmiOperation* deferred =
5422 new DeferredInlineSmiOperation(op,
5423 answer.reg(),
5424 operand->reg(),
5425 smi_value,
5426 overwrite_mode);
5427 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5428 __ SmiShiftLeftConstant(answer.reg(),
5429 operand->reg(),
5430 shift_value,
5431 deferred->entry_label());
5432 deferred->BindExit();
5433 operand->Unuse();
5434 frame_->Push(&answer);
5435 }
5436 }
5437 break;
5438
5439 case Token::BIT_OR:
5440 case Token::BIT_XOR:
5441 case Token::BIT_AND: {
5442 operand->ToRegister();
5443 frame_->Spill(operand->reg());
5444 if (reversed) {
5445 // Bit operations with a constant smi are commutative.
5446 // We can swap left and right operands with no problem.
5447 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
5448 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5449 }
5450 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5451 operand->reg(),
5452 operand->reg(),
5453 smi_value,
5454 overwrite_mode);
5455 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5456 if (op == Token::BIT_AND) {
Steve Block3ce2e202009-11-05 08:53:23 +00005457 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005458 } else if (op == Token::BIT_XOR) {
5459 if (int_value != 0) {
Steve Block3ce2e202009-11-05 08:53:23 +00005460 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005461 }
5462 } else {
5463 ASSERT(op == Token::BIT_OR);
5464 if (int_value != 0) {
Steve Block3ce2e202009-11-05 08:53:23 +00005465 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005466 }
5467 }
5468 deferred->BindExit();
5469 frame_->Push(operand);
5470 break;
5471 }
5472
5473 // Generate inline code for mod of powers of 2 and negative powers of 2.
5474 case Token::MOD:
5475 if (!reversed &&
5476 int_value != 0 &&
5477 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5478 operand->ToRegister();
5479 frame_->Spill(operand->reg());
Steve Block3ce2e202009-11-05 08:53:23 +00005480 DeferredCode* deferred =
5481 new DeferredInlineSmiOperation(op,
5482 operand->reg(),
5483 operand->reg(),
5484 smi_value,
5485 overwrite_mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00005486 // Check for negative or non-Smi left hand side.
5487 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5488 if (int_value < 0) int_value = -int_value;
5489 if (int_value == 1) {
Steve Block3ce2e202009-11-05 08:53:23 +00005490 __ Move(operand->reg(), Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00005491 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00005492 __ SmiAndConstant(operand->reg(),
5493 operand->reg(),
5494 Smi::FromInt(int_value - 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00005495 }
5496 deferred->BindExit();
5497 frame_->Push(operand);
5498 break; // This break only applies if we generated code for MOD.
5499 }
5500 // Fall through if we did not find a power of 2 on the right hand side!
5501 // The next case must be the default.
5502
5503 default: {
5504 Result constant_operand(value);
5505 if (reversed) {
5506 LikelySmiBinaryOperation(op, &constant_operand, operand,
5507 overwrite_mode);
5508 } else {
5509 LikelySmiBinaryOperation(op, operand, &constant_operand,
5510 overwrite_mode);
5511 }
5512 break;
5513 }
5514 }
5515 ASSERT(!operand->is_valid());
5516}
5517
5518void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
5519 Result* left,
5520 Result* right,
5521 OverwriteMode overwrite_mode) {
5522 // Special handling of div and mod because they use fixed registers.
5523 if (op == Token::DIV || op == Token::MOD) {
5524 // We need rax as the quotient register, rdx as the remainder
5525 // register, neither left nor right in rax or rdx, and left copied
5526 // to rax.
5527 Result quotient;
5528 Result remainder;
5529 bool left_is_in_rax = false;
5530 // Step 1: get rax for quotient.
5531 if ((left->is_register() && left->reg().is(rax)) ||
5532 (right->is_register() && right->reg().is(rax))) {
5533 // One or both is in rax. Use a fresh non-rdx register for
5534 // them.
5535 Result fresh = allocator_->Allocate();
5536 ASSERT(fresh.is_valid());
5537 if (fresh.reg().is(rdx)) {
5538 remainder = fresh;
5539 fresh = allocator_->Allocate();
5540 ASSERT(fresh.is_valid());
5541 }
5542 if (left->is_register() && left->reg().is(rax)) {
5543 quotient = *left;
5544 *left = fresh;
5545 left_is_in_rax = true;
5546 }
5547 if (right->is_register() && right->reg().is(rax)) {
5548 quotient = *right;
5549 *right = fresh;
5550 }
5551 __ movq(fresh.reg(), rax);
5552 } else {
5553 // Neither left nor right is in rax.
5554 quotient = allocator_->Allocate(rax);
5555 }
5556 ASSERT(quotient.is_register() && quotient.reg().is(rax));
5557 ASSERT(!(left->is_register() && left->reg().is(rax)));
5558 ASSERT(!(right->is_register() && right->reg().is(rax)));
5559
5560 // Step 2: get rdx for remainder if necessary.
5561 if (!remainder.is_valid()) {
5562 if ((left->is_register() && left->reg().is(rdx)) ||
5563 (right->is_register() && right->reg().is(rdx))) {
5564 Result fresh = allocator_->Allocate();
5565 ASSERT(fresh.is_valid());
5566 if (left->is_register() && left->reg().is(rdx)) {
5567 remainder = *left;
5568 *left = fresh;
5569 }
5570 if (right->is_register() && right->reg().is(rdx)) {
5571 remainder = *right;
5572 *right = fresh;
5573 }
5574 __ movq(fresh.reg(), rdx);
5575 } else {
5576 // Neither left nor right is in rdx.
5577 remainder = allocator_->Allocate(rdx);
5578 }
5579 }
5580 ASSERT(remainder.is_register() && remainder.reg().is(rdx));
5581 ASSERT(!(left->is_register() && left->reg().is(rdx)));
5582 ASSERT(!(right->is_register() && right->reg().is(rdx)));
5583
5584 left->ToRegister();
5585 right->ToRegister();
5586 frame_->Spill(rax);
5587 frame_->Spill(rdx);
5588
5589 // Check that left and right are smi tagged.
5590 DeferredInlineBinaryOperation* deferred =
5591 new DeferredInlineBinaryOperation(op,
5592 (op == Token::DIV) ? rax : rdx,
5593 left->reg(),
5594 right->reg(),
5595 overwrite_mode);
5596 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5597
5598 if (op == Token::DIV) {
5599 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5600 deferred->BindExit();
5601 left->Unuse();
5602 right->Unuse();
5603 frame_->Push(&quotient);
5604 } else {
5605 ASSERT(op == Token::MOD);
5606 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5607 deferred->BindExit();
5608 left->Unuse();
5609 right->Unuse();
5610 frame_->Push(&remainder);
5611 }
5612 return;
5613 }
5614
5615 // Special handling of shift operations because they use fixed
5616 // registers.
5617 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
5618 // Move left out of rcx if necessary.
5619 if (left->is_register() && left->reg().is(rcx)) {
5620 *left = allocator_->Allocate();
5621 ASSERT(left->is_valid());
5622 __ movq(left->reg(), rcx);
5623 }
5624 right->ToRegister(rcx);
5625 left->ToRegister();
5626 ASSERT(left->is_register() && !left->reg().is(rcx));
5627 ASSERT(right->is_register() && right->reg().is(rcx));
5628
5629 // We will modify right, it must be spilled.
5630 frame_->Spill(rcx);
5631
5632 // Use a fresh answer register to avoid spilling the left operand.
5633 Result answer = allocator_->Allocate();
5634 ASSERT(answer.is_valid());
5635 // Check that both operands are smis using the answer register as a
5636 // temporary.
5637 DeferredInlineBinaryOperation* deferred =
5638 new DeferredInlineBinaryOperation(op,
5639 answer.reg(),
5640 left->reg(),
5641 rcx,
5642 overwrite_mode);
5643 __ movq(answer.reg(), left->reg());
5644 __ or_(answer.reg(), rcx);
5645 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5646
5647 // Perform the operation.
5648 switch (op) {
5649 case Token::SAR:
5650 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5651 break;
5652 case Token::SHR: {
5653 __ SmiShiftLogicalRight(answer.reg(),
5654 left->reg(),
5655 rcx,
5656 deferred->entry_label());
5657 break;
5658 }
5659 case Token::SHL: {
5660 __ SmiShiftLeft(answer.reg(),
5661 left->reg(),
5662 rcx,
5663 deferred->entry_label());
5664 break;
5665 }
5666 default:
5667 UNREACHABLE();
5668 }
5669 deferred->BindExit();
5670 left->Unuse();
5671 right->Unuse();
5672 frame_->Push(&answer);
5673 return;
5674 }
5675
5676 // Handle the other binary operations.
5677 left->ToRegister();
5678 right->ToRegister();
5679 // A newly allocated register answer is used to hold the answer. The
5680 // registers containing left and right are not modified so they don't
5681 // need to be spilled in the fast case.
5682 Result answer = allocator_->Allocate();
5683 ASSERT(answer.is_valid());
5684
5685 // Perform the smi tag check.
5686 DeferredInlineBinaryOperation* deferred =
5687 new DeferredInlineBinaryOperation(op,
5688 answer.reg(),
5689 left->reg(),
5690 right->reg(),
5691 overwrite_mode);
5692 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5693
5694 switch (op) {
5695 case Token::ADD:
5696 __ SmiAdd(answer.reg(),
5697 left->reg(),
5698 right->reg(),
5699 deferred->entry_label());
5700 break;
5701
5702 case Token::SUB:
5703 __ SmiSub(answer.reg(),
5704 left->reg(),
5705 right->reg(),
5706 deferred->entry_label());
5707 break;
5708
5709 case Token::MUL: {
5710 __ SmiMul(answer.reg(),
5711 left->reg(),
5712 right->reg(),
5713 deferred->entry_label());
5714 break;
5715 }
5716
5717 case Token::BIT_OR:
5718 __ SmiOr(answer.reg(), left->reg(), right->reg());
5719 break;
5720
5721 case Token::BIT_AND:
5722 __ SmiAnd(answer.reg(), left->reg(), right->reg());
5723 break;
5724
5725 case Token::BIT_XOR:
5726 __ SmiXor(answer.reg(), left->reg(), right->reg());
5727 break;
5728
5729 default:
5730 UNREACHABLE();
5731 break;
5732 }
5733 deferred->BindExit();
5734 left->Unuse();
5735 right->Unuse();
5736 frame_->Push(&answer);
5737}
5738
5739
5740#undef __
5741#define __ ACCESS_MASM(masm)
5742
5743
5744Handle<String> Reference::GetName() {
5745 ASSERT(type_ == NAMED);
5746 Property* property = expression_->AsProperty();
5747 if (property == NULL) {
5748 // Global variable reference treated as a named property reference.
5749 VariableProxy* proxy = expression_->AsVariableProxy();
5750 ASSERT(proxy->AsVariable() != NULL);
5751 ASSERT(proxy->AsVariable()->is_global());
5752 return proxy->name();
5753 } else {
5754 Literal* raw_name = property->key()->AsLiteral();
5755 ASSERT(raw_name != NULL);
5756 return Handle<String>(String::cast(*raw_name->handle()));
5757 }
5758}
5759
5760
5761void Reference::GetValue(TypeofState typeof_state) {
5762 ASSERT(!cgen_->in_spilled_code());
5763 ASSERT(cgen_->HasValidEntryRegisters());
5764 ASSERT(!is_illegal());
5765 MacroAssembler* masm = cgen_->masm();
5766
5767 // Record the source position for the property load.
5768 Property* property = expression_->AsProperty();
5769 if (property != NULL) {
5770 cgen_->CodeForSourcePosition(property->position());
5771 }
5772
5773 switch (type_) {
5774 case SLOT: {
5775 Comment cmnt(masm, "[ Load from Slot");
5776 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5777 ASSERT(slot != NULL);
5778 cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
5779 break;
5780 }
5781
5782 case NAMED: {
5783 // TODO(1241834): Make sure that it is safe to ignore the
5784 // distinction between expressions in a typeof and not in a
5785 // typeof. If there is a chance that reference errors can be
5786 // thrown below, we must distinguish between the two kinds of
5787 // loads (typeof expression loads must not throw a reference
5788 // error).
5789 Variable* var = expression_->AsVariableProxy()->AsVariable();
5790 bool is_global = var != NULL;
5791 ASSERT(!is_global || var->is_global());
5792
5793 // Do not inline the inobject property case for loads from the global
5794 // object. Also do not inline for unoptimized code. This saves time
5795 // in the code generator. Unoptimized code is toplevel code or code
5796 // that is not in a loop.
5797 if (is_global ||
5798 cgen_->scope()->is_global_scope() ||
5799 cgen_->loop_nesting() == 0) {
5800 Comment cmnt(masm, "[ Load from named Property");
5801 cgen_->frame()->Push(GetName());
5802
5803 RelocInfo::Mode mode = is_global
5804 ? RelocInfo::CODE_TARGET_CONTEXT
5805 : RelocInfo::CODE_TARGET;
5806 Result answer = cgen_->frame()->CallLoadIC(mode);
5807 // A test rax instruction following the call signals that the
5808 // inobject property case was inlined. Ensure that there is not
5809 // a test rax instruction here.
5810 __ nop();
5811 cgen_->frame()->Push(&answer);
5812 } else {
5813 // Inline the inobject property case.
5814 Comment cmnt(masm, "[ Inlined named property load");
5815 Result receiver = cgen_->frame()->Pop();
5816 receiver.ToRegister();
5817 Result value = cgen_->allocator()->Allocate();
5818 ASSERT(value.is_valid());
5819 // Cannot use r12 for receiver, because that changes
5820 // the distance between a call and a fixup location,
5821 // due to a special encoding of r12 as r/m in a ModR/M byte.
5822 if (receiver.reg().is(r12)) {
5823 // Swap receiver and value.
5824 __ movq(value.reg(), receiver.reg());
5825 Result temp = receiver;
5826 receiver = value;
5827 value = temp;
5828 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
5829 }
5830
5831 DeferredReferenceGetNamedValue* deferred =
5832 new DeferredReferenceGetNamedValue(value.reg(),
5833 receiver.reg(),
5834 GetName());
5835
5836 // Check that the receiver is a heap object.
5837 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5838
5839 __ bind(deferred->patch_site());
5840 // This is the map check instruction that will be patched (so we can't
5841 // use the double underscore macro that may insert instructions).
5842 // Initially use an invalid map to force a failure.
5843 masm->Move(kScratchRegister, Factory::null_value());
5844 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5845 kScratchRegister);
5846 // This branch is always a forwards branch so it's always a fixed
5847 // size which allows the assert below to succeed and patching to work.
5848 // Don't use deferred->Branch(...), since that might add coverage code.
5849 masm->j(not_equal, deferred->entry_label());
5850
5851 // The delta from the patch label to the load offset must be
5852 // statically known.
5853 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
5854 LoadIC::kOffsetToLoadInstruction);
5855 // The initial (invalid) offset has to be large enough to force
5856 // a 32-bit instruction encoding to allow patching with an
5857 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
5858 int offset = kMaxInt;
5859 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
5860
5861 __ IncrementCounter(&Counters::named_load_inline, 1);
5862 deferred->BindExit();
5863 cgen_->frame()->Push(&receiver);
5864 cgen_->frame()->Push(&value);
5865 }
5866 break;
5867 }
5868
5869 case KEYED: {
5870 // TODO(1241834): Make sure that this it is safe to ignore the
5871 // distinction between expressions in a typeof and not in a typeof.
5872 Comment cmnt(masm, "[ Load from keyed Property");
5873 Variable* var = expression_->AsVariableProxy()->AsVariable();
5874 bool is_global = var != NULL;
5875 ASSERT(!is_global || var->is_global());
5876
5877 // Inline array load code if inside of a loop. We do not know
5878 // the receiver map yet, so we initially generate the code with
5879 // a check against an invalid map. In the inline cache code, we
5880 // patch the map check if appropriate.
5881 if (cgen_->loop_nesting() > 0) {
5882 Comment cmnt(masm, "[ Inlined load from keyed Property");
5883
5884 Result key = cgen_->frame()->Pop();
5885 Result receiver = cgen_->frame()->Pop();
5886 key.ToRegister();
5887 receiver.ToRegister();
5888
5889 // Use a fresh temporary to load the elements without destroying
5890 // the receiver which is needed for the deferred slow case.
5891 Result elements = cgen_->allocator()->Allocate();
5892 ASSERT(elements.is_valid());
5893
5894 // Use a fresh temporary for the index and later the loaded
5895 // value.
5896 Result index = cgen_->allocator()->Allocate();
5897 ASSERT(index.is_valid());
5898
5899 DeferredReferenceGetKeyedValue* deferred =
5900 new DeferredReferenceGetKeyedValue(index.reg(),
5901 receiver.reg(),
5902 key.reg(),
5903 is_global);
5904
5905 // Check that the receiver is not a smi (only needed if this
5906 // is not a load from the global context) and that it has the
5907 // expected map.
5908 if (!is_global) {
5909 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5910 }
5911
5912 // Initially, use an invalid map. The map is patched in the IC
5913 // initialization code.
5914 __ bind(deferred->patch_site());
5915 // Use masm-> here instead of the double underscore macro since extra
5916 // coverage code can interfere with the patching.
5917 masm->movq(kScratchRegister, Factory::null_value(),
5918 RelocInfo::EMBEDDED_OBJECT);
5919 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5920 kScratchRegister);
5921 deferred->Branch(not_equal);
5922
5923 // Check that the key is a non-negative smi.
5924 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
5925
5926 // Get the elements array from the receiver and check that it
5927 // is not a dictionary.
5928 __ movq(elements.reg(),
5929 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5930 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5931 Factory::fixed_array_map());
5932 deferred->Branch(not_equal);
5933
5934 // Shift the key to get the actual index value and check that
5935 // it is within bounds.
5936 __ SmiToInteger32(index.reg(), key.reg());
5937 __ cmpl(index.reg(),
5938 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
5939 deferred->Branch(above_equal);
5940
5941 // The index register holds the un-smi-tagged key. It has been
5942 // zero-extended to 64-bits, so it can be used directly as index in the
5943 // operand below.
5944 // Load and check that the result is not the hole. We could
5945 // reuse the index or elements register for the value.
5946 //
5947 // TODO(206): Consider whether it makes sense to try some
5948 // heuristic about which register to reuse. For example, if
5949 // one is rax, the we can reuse that one because the value
5950 // coming from the deferred code will be in rax.
5951 Result value = index;
5952 __ movq(value.reg(),
5953 Operand(elements.reg(),
5954 index.reg(),
5955 times_pointer_size,
5956 FixedArray::kHeaderSize - kHeapObjectTag));
5957 elements.Unuse();
5958 index.Unuse();
5959 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
5960 deferred->Branch(equal);
5961 __ IncrementCounter(&Counters::keyed_load_inline, 1);
5962
5963 deferred->BindExit();
5964 // Restore the receiver and key to the frame and push the
5965 // result on top of it.
5966 cgen_->frame()->Push(&receiver);
5967 cgen_->frame()->Push(&key);
5968 cgen_->frame()->Push(&value);
5969
5970 } else {
5971 Comment cmnt(masm, "[ Load from keyed Property");
5972 RelocInfo::Mode mode = is_global
5973 ? RelocInfo::CODE_TARGET_CONTEXT
5974 : RelocInfo::CODE_TARGET;
5975 Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
5976 // Make sure that we do not have a test instruction after the
5977 // call. A test instruction after the call is used to
5978 // indicate that we have generated an inline version of the
5979 // keyed load. The explicit nop instruction is here because
5980 // the push that follows might be peep-hole optimized away.
5981 __ nop();
5982 cgen_->frame()->Push(&answer);
5983 }
5984 break;
5985 }
5986
5987 default:
5988 UNREACHABLE();
5989 }
5990}
5991
5992
5993void Reference::TakeValue(TypeofState typeof_state) {
5994 // TODO(X64): This function is completely architecture independent. Move
5995 // it somewhere shared.
5996
5997 // For non-constant frame-allocated slots, we invalidate the value in the
5998 // slot. For all others, we fall back on GetValue.
5999 ASSERT(!cgen_->in_spilled_code());
6000 ASSERT(!is_illegal());
6001 if (type_ != SLOT) {
6002 GetValue(typeof_state);
6003 return;
6004 }
6005
6006 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6007 ASSERT(slot != NULL);
6008 if (slot->type() == Slot::LOOKUP ||
6009 slot->type() == Slot::CONTEXT ||
6010 slot->var()->mode() == Variable::CONST ||
6011 slot->is_arguments()) {
6012 GetValue(typeof_state);
6013 return;
6014 }
6015
6016 // Only non-constant, frame-allocated parameters and locals can reach
6017 // here. Be careful not to use the optimizations for arguments
6018 // object access since it may not have been initialized yet.
6019 ASSERT(!slot->is_arguments());
6020 if (slot->type() == Slot::PARAMETER) {
6021 cgen_->frame()->TakeParameterAt(slot->index());
6022 } else {
6023 ASSERT(slot->type() == Slot::LOCAL);
6024 cgen_->frame()->TakeLocalAt(slot->index());
6025 }
6026}
6027
6028
6029void Reference::SetValue(InitState init_state) {
6030 ASSERT(cgen_->HasValidEntryRegisters());
6031 ASSERT(!is_illegal());
6032 MacroAssembler* masm = cgen_->masm();
6033 switch (type_) {
6034 case SLOT: {
6035 Comment cmnt(masm, "[ Store to Slot");
6036 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6037 ASSERT(slot != NULL);
6038 cgen_->StoreToSlot(slot, init_state);
6039 break;
6040 }
6041
6042 case NAMED: {
6043 Comment cmnt(masm, "[ Store to named Property");
6044 cgen_->frame()->Push(GetName());
6045 Result answer = cgen_->frame()->CallStoreIC();
6046 cgen_->frame()->Push(&answer);
6047 break;
6048 }
6049
6050 case KEYED: {
6051 Comment cmnt(masm, "[ Store to keyed Property");
6052
6053 // Generate inlined version of the keyed store if the code is in
6054 // a loop and the key is likely to be a smi.
6055 Property* property = expression()->AsProperty();
6056 ASSERT(property != NULL);
6057 SmiAnalysis* key_smi_analysis = property->key()->type();
6058
6059 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6060 Comment cmnt(masm, "[ Inlined store to keyed Property");
6061
6062 // Get the receiver, key and value into registers.
6063 Result value = cgen_->frame()->Pop();
6064 Result key = cgen_->frame()->Pop();
6065 Result receiver = cgen_->frame()->Pop();
6066
6067 Result tmp = cgen_->allocator_->Allocate();
6068 ASSERT(tmp.is_valid());
6069
6070 // Determine whether the value is a constant before putting it
6071 // in a register.
6072 bool value_is_constant = value.is_constant();
6073
6074 // Make sure that value, key and receiver are in registers.
6075 value.ToRegister();
6076 key.ToRegister();
6077 receiver.ToRegister();
6078
6079 DeferredReferenceSetKeyedValue* deferred =
6080 new DeferredReferenceSetKeyedValue(value.reg(),
6081 key.reg(),
6082 receiver.reg());
6083
6084 // Check that the value is a smi if it is not a constant.
6085 // We can skip the write barrier for smis and constants.
6086 if (!value_is_constant) {
6087 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6088 }
6089
6090 // Check that the key is a non-negative smi.
6091 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00006092
6093 // Check that the receiver is not a smi.
6094 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6095
6096 // Check that the receiver is a JSArray.
6097 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6098 deferred->Branch(not_equal);
6099
6100 // Check that the key is within bounds. Both the key and the
Steve Block3ce2e202009-11-05 08:53:23 +00006101 // length of the JSArray are smis.
6102 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
6103 key.reg());
6104 deferred->Branch(less_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00006105
6106 // Get the elements array from the receiver and check that it
6107 // is a flat array (not a dictionary).
6108 __ movq(tmp.reg(),
6109 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6110 // Bind the deferred code patch site to be able to locate the
6111 // fixed array map comparison. When debugging, we patch this
6112 // comparison to always fail so that we will hit the IC call
6113 // in the deferred code which will allow the debugger to
6114 // break for fast case stores.
6115 __ bind(deferred->patch_site());
6116 // Avoid using __ to ensure the distance from patch_site
6117 // to the map address is always the same.
6118 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6119 RelocInfo::EMBEDDED_OBJECT);
6120 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6121 kScratchRegister);
6122 deferred->Branch(not_equal);
6123
6124 // Store the value.
6125 SmiIndex index =
6126 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6127 __ movq(Operand(tmp.reg(),
6128 index.reg,
6129 index.scale,
6130 FixedArray::kHeaderSize - kHeapObjectTag),
6131 value.reg());
6132 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6133
6134 deferred->BindExit();
6135
6136 cgen_->frame()->Push(&receiver);
6137 cgen_->frame()->Push(&key);
6138 cgen_->frame()->Push(&value);
6139 } else {
6140 Result answer = cgen_->frame()->CallKeyedStoreIC();
6141 // Make sure that we do not have a test instruction after the
6142 // call. A test instruction after the call is used to
6143 // indicate that we have generated an inline version of the
6144 // keyed store.
6145 masm->nop();
6146 cgen_->frame()->Push(&answer);
6147 }
6148 break;
6149 }
6150
6151 default:
6152 UNREACHABLE();
6153 }
6154}
6155
6156
6157void ToBooleanStub::Generate(MacroAssembler* masm) {
6158 Label false_result, true_result, not_string;
6159 __ movq(rax, Operand(rsp, 1 * kPointerSize));
6160
6161 // 'null' => false.
6162 __ CompareRoot(rax, Heap::kNullValueRootIndex);
6163 __ j(equal, &false_result);
6164
6165 // Get the map and type of the heap object.
6166 // We don't use CmpObjectType because we manipulate the type field.
6167 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6168 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
6169
6170 // Undetectable => false.
6171 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
6172 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
6173 __ j(not_zero, &false_result);
6174
6175 // JavaScript object => true.
6176 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
6177 __ j(above_equal, &true_result);
6178
6179 // String value => false iff empty.
6180 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
6181 __ j(above_equal, &not_string);
6182 __ and_(rcx, Immediate(kStringSizeMask));
6183 __ cmpq(rcx, Immediate(kShortStringTag));
6184 __ j(not_equal, &true_result); // Empty string is always short.
6185 __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
6186 __ shr(rdx, Immediate(String::kShortLengthShift));
6187 __ j(zero, &false_result);
6188 __ jmp(&true_result);
6189
6190 __ bind(&not_string);
6191 // HeapNumber => false iff +0, -0, or NaN.
6192 // These three cases set C3 when compared to zero in the FPU.
6193 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6194 __ j(not_equal, &true_result);
Steve Blocka7e24c12009-10-30 11:49:00 +00006195 __ fldz(); // Load zero onto fp stack
6196 // Load heap-number double value onto fp stack
6197 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006198 __ FCmp();
6199 __ j(zero, &false_result);
Steve Blocka7e24c12009-10-30 11:49:00 +00006200 // Fall through to |true_result|.
6201
6202 // Return 1/0 for true/false in rax.
6203 __ bind(&true_result);
6204 __ movq(rax, Immediate(1));
6205 __ ret(1 * kPointerSize);
6206 __ bind(&false_result);
6207 __ xor_(rax, rax);
6208 __ ret(1 * kPointerSize);
6209}
6210
6211
6212bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
6213 // TODO(X64): This method is identical to the ia32 version.
6214 // Either find a reason to change it, or move it somewhere where it can be
6215 // shared. (Notice: It assumes that a Smi can fit in an int).
6216
6217 Object* answer_object = Heap::undefined_value();
6218 switch (op) {
6219 case Token::ADD:
6220 if (Smi::IsValid(left + right)) {
6221 answer_object = Smi::FromInt(left + right);
6222 }
6223 break;
6224 case Token::SUB:
6225 if (Smi::IsValid(left - right)) {
6226 answer_object = Smi::FromInt(left - right);
6227 }
6228 break;
6229 case Token::MUL: {
6230 double answer = static_cast<double>(left) * right;
6231 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
6232 // If the product is zero and the non-zero factor is negative,
6233 // the spec requires us to return floating point negative zero.
6234 if (answer != 0 || (left + right) >= 0) {
6235 answer_object = Smi::FromInt(static_cast<int>(answer));
6236 }
6237 }
6238 }
6239 break;
6240 case Token::DIV:
6241 case Token::MOD:
6242 break;
6243 case Token::BIT_OR:
6244 answer_object = Smi::FromInt(left | right);
6245 break;
6246 case Token::BIT_AND:
6247 answer_object = Smi::FromInt(left & right);
6248 break;
6249 case Token::BIT_XOR:
6250 answer_object = Smi::FromInt(left ^ right);
6251 break;
6252
6253 case Token::SHL: {
6254 int shift_amount = right & 0x1F;
6255 if (Smi::IsValid(left << shift_amount)) {
6256 answer_object = Smi::FromInt(left << shift_amount);
6257 }
6258 break;
6259 }
6260 case Token::SHR: {
6261 int shift_amount = right & 0x1F;
6262 unsigned int unsigned_left = left;
6263 unsigned_left >>= shift_amount;
6264 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
6265 answer_object = Smi::FromInt(unsigned_left);
6266 }
6267 break;
6268 }
6269 case Token::SAR: {
6270 int shift_amount = right & 0x1F;
6271 unsigned int unsigned_left = left;
6272 if (left < 0) {
6273 // Perform arithmetic shift of a negative number by
6274 // complementing number, logical shifting, complementing again.
6275 unsigned_left = ~unsigned_left;
6276 unsigned_left >>= shift_amount;
6277 unsigned_left = ~unsigned_left;
6278 } else {
6279 unsigned_left >>= shift_amount;
6280 }
6281 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
6282 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
6283 break;
6284 }
6285 default:
6286 UNREACHABLE();
6287 break;
6288 }
6289 if (answer_object == Heap::undefined_value()) {
6290 return false;
6291 }
6292 frame_->Push(Handle<Object>(answer_object));
6293 return true;
6294}
6295
6296
6297// End of CodeGenerator implementation.
6298
6299void UnarySubStub::Generate(MacroAssembler* masm) {
6300 Label slow;
6301 Label done;
6302 Label try_float;
Steve Blocka7e24c12009-10-30 11:49:00 +00006303 // Check whether the value is a smi.
6304 __ JumpIfNotSmi(rax, &try_float);
6305
6306 // Enter runtime system if the value of the smi is zero
6307 // to make sure that we switch between 0 and -0.
Steve Block3ce2e202009-11-05 08:53:23 +00006308 // Also enter it if the value of the smi is Smi::kMinValue.
6309 __ SmiNeg(rax, rax, &done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006310
Steve Block3ce2e202009-11-05 08:53:23 +00006311 // Either zero or Smi::kMinValue, neither of which become a smi when negated.
6312 __ SmiCompare(rax, Smi::FromInt(0));
6313 __ j(not_equal, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006314 __ Move(rax, Factory::minus_zero_value());
6315 __ jmp(&done);
6316
6317 // Enter runtime system.
6318 __ bind(&slow);
6319 __ pop(rcx); // pop return address
6320 __ push(rax);
6321 __ push(rcx); // push return address
6322 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
6323 __ jmp(&done);
6324
6325 // Try floating point case.
6326 __ bind(&try_float);
6327 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6328 __ Cmp(rdx, Factory::heap_number_map());
6329 __ j(not_equal, &slow);
6330 // Operand is a float, negate its value by flipping sign bit.
6331 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
6332 __ movq(kScratchRegister, Immediate(0x01));
6333 __ shl(kScratchRegister, Immediate(63));
6334 __ xor_(rdx, kScratchRegister); // Flip sign.
6335 // rdx is value to store.
6336 if (overwrite_) {
6337 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
6338 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00006339 __ AllocateHeapNumber(rcx, rbx, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006340 // rcx: allocated 'empty' number
6341 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
6342 __ movq(rax, rcx);
6343 }
6344
6345 __ bind(&done);
6346 __ StubReturn(1);
6347}
6348
6349
6350void CompareStub::Generate(MacroAssembler* masm) {
6351 Label call_builtin, done;
6352
6353 // NOTICE! This code is only reached after a smi-fast-case check, so
6354 // it is certain that at least one operand isn't a smi.
6355
6356 if (cc_ == equal) { // Both strict and non-strict.
6357 Label slow; // Fallthrough label.
6358 // Equality is almost reflexive (everything but NaN), so start by testing
6359 // for "identity and not NaN".
6360 {
6361 Label not_identical;
6362 __ cmpq(rax, rdx);
6363 __ j(not_equal, &not_identical);
6364 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6365 // so we do the second best thing - test it ourselves.
6366
6367 Label return_equal;
6368 Label heap_number;
6369 // If it's not a heap number, then return equal.
6370 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
6371 Factory::heap_number_map());
6372 __ j(equal, &heap_number);
6373 __ bind(&return_equal);
6374 __ xor_(rax, rax);
6375 __ ret(0);
6376
6377 __ bind(&heap_number);
6378 // It is a heap number, so return non-equal if it's NaN and equal if it's
6379 // not NaN.
6380 // The representation of NaN values has all exponent bits (52..62) set,
6381 // and not all mantissa bits (0..51) clear.
6382 // Read double representation into rax.
6383 __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
6384 __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
6385 // Test that exponent bits are all set.
6386 __ or_(rbx, rax);
6387 __ cmpq(rbx, rax);
6388 __ j(not_equal, &return_equal);
6389 // Shift out flag and all exponent bits, retaining only mantissa.
6390 __ shl(rax, Immediate(12));
6391 // If all bits in the mantissa are zero the number is Infinity, and
6392 // we return zero. Otherwise it is a NaN, and we return non-zero.
6393 // We cannot just return rax because only eax is tested on return.
6394 __ setcc(not_zero, rax);
6395 __ ret(0);
6396
6397 __ bind(&not_identical);
6398 }
6399
6400 // If we're doing a strict equality comparison, we don't have to do
6401 // type conversion, so we generate code to do fast comparison for objects
6402 // and oddballs. Non-smi numbers and strings still go through the usual
6403 // slow-case code.
6404 if (strict_) {
6405 // If either is a Smi (we know that not both are), then they can only
6406 // be equal if the other is a HeapNumber. If so, use the slow case.
6407 {
6408 Label not_smis;
6409 __ SelectNonSmi(rbx, rax, rdx, &not_smis);
6410
6411 // Check if the non-smi operand is a heap number.
6412 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
6413 Factory::heap_number_map());
6414 // If heap number, handle it in the slow case.
6415 __ j(equal, &slow);
6416 // Return non-equal. ebx (the lower half of rbx) is not zero.
6417 __ movq(rax, rbx);
6418 __ ret(0);
6419
6420 __ bind(&not_smis);
6421 }
6422
6423 // If either operand is a JSObject or an oddball value, then they are not
6424 // equal since their pointers are different
6425 // There is no test for undetectability in strict equality.
6426
6427 // If the first object is a JS object, we have done pointer comparison.
6428 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6429 Label first_non_object;
6430 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
6431 __ j(below, &first_non_object);
6432 // Return non-zero (eax (not rax) is not zero)
6433 Label return_not_equal;
6434 ASSERT(kHeapObjectTag != 0);
6435 __ bind(&return_not_equal);
6436 __ ret(0);
6437
6438 __ bind(&first_non_object);
6439 // Check for oddballs: true, false, null, undefined.
6440 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6441 __ j(equal, &return_not_equal);
6442
6443 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
6444 __ j(above_equal, &return_not_equal);
6445
6446 // Check for oddballs: true, false, null, undefined.
6447 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6448 __ j(equal, &return_not_equal);
6449
6450 // Fall through to the general case.
6451 }
6452 __ bind(&slow);
6453 }
6454
6455 // Push arguments below the return address to prepare jump to builtin.
6456 __ pop(rcx);
6457 __ push(rax);
6458 __ push(rdx);
6459 __ push(rcx);
6460
6461 // Inlined floating point compare.
6462 // Call builtin if operands are not floating point or smi.
6463 Label check_for_symbols;
6464 // Push arguments on stack, for helper functions.
Steve Block3ce2e202009-11-05 08:53:23 +00006465 FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
Steve Blocka7e24c12009-10-30 11:49:00 +00006466 FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
6467 __ FCmp();
6468
6469 // Jump to builtin for NaN.
6470 __ j(parity_even, &call_builtin);
6471
6472 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
6473 Label below_lbl, above_lbl;
6474 // use rdx, rax to convert unsigned to signed comparison
6475 __ j(below, &below_lbl);
6476 __ j(above, &above_lbl);
6477
6478 __ xor_(rax, rax); // equal
6479 __ ret(2 * kPointerSize);
6480
6481 __ bind(&below_lbl);
6482 __ movq(rax, Immediate(-1));
6483 __ ret(2 * kPointerSize);
6484
6485 __ bind(&above_lbl);
6486 __ movq(rax, Immediate(1));
6487 __ ret(2 * kPointerSize); // rax, rdx were pushed
6488
6489 // Fast negative check for symbol-to-symbol equality.
6490 __ bind(&check_for_symbols);
6491 if (cc_ == equal) {
6492 BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
6493 BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
6494
6495 // We've already checked for object identity, so if both operands
6496 // are symbols they aren't equal. Register eax (not rax) already holds a
6497 // non-zero value, which indicates not equal, so just return.
6498 __ ret(2 * kPointerSize);
6499 }
6500
6501 __ bind(&call_builtin);
6502 // must swap argument order
6503 __ pop(rcx);
6504 __ pop(rdx);
6505 __ pop(rax);
6506 __ push(rdx);
6507 __ push(rax);
6508
6509 // Figure out which native to call and setup the arguments.
6510 Builtins::JavaScript builtin;
6511 if (cc_ == equal) {
6512 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6513 } else {
6514 builtin = Builtins::COMPARE;
6515 int ncr; // NaN compare result
6516 if (cc_ == less || cc_ == less_equal) {
6517 ncr = GREATER;
6518 } else {
6519 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
6520 ncr = LESS;
6521 }
Steve Block3ce2e202009-11-05 08:53:23 +00006522 __ Push(Smi::FromInt(ncr));
Steve Blocka7e24c12009-10-30 11:49:00 +00006523 }
6524
6525 // Restore return address on the stack.
6526 __ push(rcx);
6527
6528 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6529 // tagged as a small integer.
6530 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
6531}
6532
6533
6534void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
6535 Label* label,
6536 Register object,
6537 Register scratch) {
6538 __ JumpIfSmi(object, label);
6539 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
6540 __ movzxbq(scratch,
6541 FieldOperand(scratch, Map::kInstanceTypeOffset));
6542 __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
6543 __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
6544 __ j(not_equal, label);
6545}
6546
6547
6548// Call the function just below TOS on the stack with the given
6549// arguments. The receiver is the TOS.
6550void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
6551 int position) {
6552 // Push the arguments ("left-to-right") on the stack.
6553 int arg_count = args->length();
6554 for (int i = 0; i < arg_count; i++) {
6555 Load(args->at(i));
6556 }
6557
6558 // Record the position for debugging purposes.
6559 CodeForSourcePosition(position);
6560
6561 // Use the shared code stub to call the function.
6562 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
6563 CallFunctionStub call_function(arg_count, in_loop);
6564 Result answer = frame_->CallStub(&call_function, arg_count + 1);
6565 // Restore context and replace function on the stack with the
6566 // result of the stub invocation.
6567 frame_->RestoreContextRegister();
6568 frame_->SetElementAt(0, &answer);
6569}
6570
6571
6572void InstanceofStub::Generate(MacroAssembler* masm) {
6573 // Implements "value instanceof function" operator.
6574 // Expected input state:
6575 // rsp[0] : return address
6576 // rsp[1] : function pointer
6577 // rsp[2] : value
6578
6579 // Get the object - go slow case if it's a smi.
6580 Label slow;
6581 __ movq(rax, Operand(rsp, 2 * kPointerSize));
6582 __ JumpIfSmi(rax, &slow);
6583
6584 // Check that the left hand is a JS object. Leave its map in rax.
6585 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
6586 __ j(below, &slow);
6587 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
6588 __ j(above, &slow);
6589
6590 // Get the prototype of the function.
6591 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6592 __ TryGetFunctionPrototype(rdx, rbx, &slow);
6593
6594 // Check that the function prototype is a JS object.
6595 __ JumpIfSmi(rbx, &slow);
6596 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
6597 __ j(below, &slow);
6598 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
6599 __ j(above, &slow);
6600
6601 // Register mapping: rax is object map and rbx is function prototype.
6602 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
6603
6604 // Loop through the prototype chain looking for the function prototype.
6605 Label loop, is_instance, is_not_instance;
6606 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
6607 __ bind(&loop);
6608 __ cmpq(rcx, rbx);
6609 __ j(equal, &is_instance);
6610 __ cmpq(rcx, kScratchRegister);
6611 __ j(equal, &is_not_instance);
6612 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
6613 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
6614 __ jmp(&loop);
6615
6616 __ bind(&is_instance);
6617 __ xor_(rax, rax);
6618 __ ret(2 * kPointerSize);
6619
6620 __ bind(&is_not_instance);
Steve Block3ce2e202009-11-05 08:53:23 +00006621 __ Move(rax, Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00006622 __ ret(2 * kPointerSize);
6623
6624 // Slow-case: Go through the JavaScript implementation.
6625 __ bind(&slow);
6626 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
6627}
6628
6629
6630void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6631 // The displacement is used for skipping the return address and the
6632 // frame pointer on the stack. It is the offset of the last
6633 // parameter (if any) relative to the frame pointer.
6634 static const int kDisplacement = 2 * kPointerSize;
6635
6636 // Check if the calling frame is an arguments adaptor frame.
6637 Label runtime;
6638 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006639 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6640 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006641 __ j(not_equal, &runtime);
6642 // Value in rcx is Smi encoded.
6643
6644 // Patch the arguments.length and the parameters pointer.
6645 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6646 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
6647 SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
6648 __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
6649 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
6650
6651 // Do the runtime call to allocate the arguments object.
6652 __ bind(&runtime);
6653 Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
6654 __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
6655}
6656
6657
6658void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6659 // The key is in rdx and the parameter count is in rax.
6660
6661 // The displacement is used for skipping the frame pointer on the
6662 // stack. It is the offset of the last parameter (if any) relative
6663 // to the frame pointer.
6664 static const int kDisplacement = 1 * kPointerSize;
6665
6666 // Check that the key is a smi.
6667 Label slow;
6668 __ JumpIfNotSmi(rdx, &slow);
6669
6670 // Check if the calling frame is an arguments adaptor frame.
6671 Label adaptor;
6672 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006673 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
6674 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006675 __ j(equal, &adaptor);
6676
6677 // Check index against formal parameters count limit passed in
6678 // through register rax. Use unsigned comparison to get negative
6679 // check for free.
6680 __ cmpq(rdx, rax);
6681 __ j(above_equal, &slow);
6682
6683 // Read the argument from the stack and return it.
6684 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
6685 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
6686 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6687 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6688 __ Ret();
6689
6690 // Arguments adaptor case: Check index against actual arguments
6691 // limit found in the arguments adaptor frame. Use unsigned
6692 // comparison to get negative check for free.
6693 __ bind(&adaptor);
6694 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6695 __ cmpq(rdx, rcx);
6696 __ j(above_equal, &slow);
6697
6698 // Read the argument from the stack and return it.
6699 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
6700 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
6701 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6702 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6703 __ Ret();
6704
6705 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6706 // by calling the runtime system.
6707 __ bind(&slow);
6708 __ pop(rbx); // Return address.
6709 __ push(rdx);
6710 __ push(rbx);
6711 Runtime::Function* f =
6712 Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
6713 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
6714}
6715
6716
6717void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6718 // Check if the calling frame is an arguments adaptor frame.
6719 Label adaptor;
6720 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006721 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6722 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006723 __ j(equal, &adaptor);
6724
6725 // Nothing to do: The formal number of parameters has already been
6726 // passed in register rax by calling function. Just return it.
6727 __ ret(0);
6728
6729 // Arguments adaptor case: Read the arguments length from the
6730 // adaptor frame and return it.
6731 __ bind(&adaptor);
6732 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6733 __ ret(0);
6734}
6735
6736
6737int CEntryStub::MinorKey() {
6738 ASSERT(result_size_ <= 2);
6739#ifdef _WIN64
6740 // Simple results returned in rax (using default code).
6741 // Complex results must be written to address passed as first argument.
6742 // Use even numbers for minor keys, reserving the odd numbers for
6743 // CEntryDebugBreakStub.
6744 return (result_size_ < 2) ? 0 : result_size_ * 2;
6745#else
6746 // Single results returned in rax (both AMD64 and Win64 calling conventions)
6747 // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
6748 // by default.
6749 return 0;
6750#endif
6751}
6752
6753
6754void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6755 // Check that stack should contain next handler, frame pointer, state and
6756 // return address in that order.
6757 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6758 StackHandlerConstants::kStateOffset);
6759 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6760 StackHandlerConstants::kPCOffset);
6761
6762 ExternalReference handler_address(Top::k_handler_address);
6763 __ movq(kScratchRegister, handler_address);
6764 __ movq(rsp, Operand(kScratchRegister, 0));
6765 // get next in chain
6766 __ pop(rcx);
6767 __ movq(Operand(kScratchRegister, 0), rcx);
6768 __ pop(rbp); // pop frame pointer
6769 __ pop(rdx); // remove state
6770
6771 // Before returning we restore the context from the frame pointer if not NULL.
6772 // The frame pointer is NULL in the exception handler of a JS entry frame.
6773 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
6774 Label skip;
6775 __ cmpq(rbp, Immediate(0));
6776 __ j(equal, &skip);
6777 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6778 __ bind(&skip);
6779 __ ret(0);
6780}
6781
6782
6783void CEntryStub::GenerateCore(MacroAssembler* masm,
6784 Label* throw_normal_exception,
6785 Label* throw_termination_exception,
6786 Label* throw_out_of_memory_exception,
6787 StackFrame::Type frame_type,
6788 bool do_gc,
6789 bool always_allocate_scope) {
6790 // rax: result parameter for PerformGC, if any.
6791 // rbx: pointer to C function (C callee-saved).
6792 // rbp: frame pointer (restored after C call).
6793 // rsp: stack pointer (restored after C call).
6794 // r14: number of arguments including receiver (C callee-saved).
6795 // r15: pointer to the first argument (C callee-saved).
6796 // This pointer is reused in LeaveExitFrame(), so it is stored in a
6797 // callee-saved register.
6798
6799 if (do_gc) {
6800 // Pass failure code returned from last attempt as first argument to GC.
6801#ifdef _WIN64
6802 __ movq(rcx, rax);
6803#else // ! defined(_WIN64)
6804 __ movq(rdi, rax);
6805#endif
6806 __ movq(kScratchRegister,
6807 FUNCTION_ADDR(Runtime::PerformGC),
6808 RelocInfo::RUNTIME_ENTRY);
6809 __ call(kScratchRegister);
6810 }
6811
6812 ExternalReference scope_depth =
6813 ExternalReference::heap_always_allocate_scope_depth();
6814 if (always_allocate_scope) {
6815 __ movq(kScratchRegister, scope_depth);
6816 __ incl(Operand(kScratchRegister, 0));
6817 }
6818
6819 // Call C function.
6820#ifdef _WIN64
6821 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
6822 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
6823 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
6824 __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
6825 if (result_size_ < 2) {
6826 // Pass a pointer to the Arguments object as the first argument.
6827 // Return result in single register (rax).
6828 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
6829 } else {
6830 ASSERT_EQ(2, result_size_);
6831 // Pass a pointer to the result location as the first argument.
6832 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
6833 // Pass a pointer to the Arguments object as the second argument.
6834 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
6835 }
6836
6837#else // ! defined(_WIN64)
6838 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
6839 __ movq(rdi, r14); // argc.
6840 __ movq(rsi, r15); // argv.
6841#endif
6842 __ call(rbx);
6843 // Result is in rax - do not destroy this register!
6844
6845 if (always_allocate_scope) {
6846 __ movq(kScratchRegister, scope_depth);
6847 __ decl(Operand(kScratchRegister, 0));
6848 }
6849
6850 // Check for failure result.
6851 Label failure_returned;
6852 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
Steve Block3ce2e202009-11-05 08:53:23 +00006853#ifdef _WIN64
6854 // If return value is on the stack, pop it to registers.
6855 if (result_size_ > 1) {
6856 ASSERT_EQ(2, result_size_);
6857 // Position above 4 argument mirrors and arguments object.
6858 __ movq(rax, Operand(rsp, 6 * kPointerSize));
6859 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
6860 }
6861#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00006862 __ lea(rcx, Operand(rax, 1));
6863 // Lower 2 bits of rcx are 0 iff rax has failure tag.
6864 __ testl(rcx, Immediate(kFailureTagMask));
6865 __ j(zero, &failure_returned);
6866
6867 // Exit the JavaScript to C++ exit frame.
6868 __ LeaveExitFrame(frame_type, result_size_);
6869 __ ret(0);
6870
6871 // Handling of failure.
6872 __ bind(&failure_returned);
6873
6874 Label retry;
6875 // If the returned exception is RETRY_AFTER_GC continue at retry label
6876 ASSERT(Failure::RETRY_AFTER_GC == 0);
6877 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6878 __ j(zero, &retry);
6879
6880 // Special handling of out of memory exceptions.
6881 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
6882 __ cmpq(rax, kScratchRegister);
6883 __ j(equal, throw_out_of_memory_exception);
6884
6885 // Retrieve the pending exception and clear the variable.
6886 ExternalReference pending_exception_address(Top::k_pending_exception_address);
6887 __ movq(kScratchRegister, pending_exception_address);
6888 __ movq(rax, Operand(kScratchRegister, 0));
6889 __ movq(rdx, ExternalReference::the_hole_value_location());
6890 __ movq(rdx, Operand(rdx, 0));
6891 __ movq(Operand(kScratchRegister, 0), rdx);
6892
6893 // Special handling of termination exceptions which are uncatchable
6894 // by javascript code.
6895 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
6896 __ j(equal, throw_termination_exception);
6897
6898 // Handle normal exception.
6899 __ jmp(throw_normal_exception);
6900
6901 // Retry.
6902 __ bind(&retry);
6903}
6904
6905
6906void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6907 UncatchableExceptionType type) {
6908 // Fetch top stack handler.
6909 ExternalReference handler_address(Top::k_handler_address);
6910 __ movq(kScratchRegister, handler_address);
6911 __ movq(rsp, Operand(kScratchRegister, 0));
6912
6913 // Unwind the handlers until the ENTRY handler is found.
6914 Label loop, done;
6915 __ bind(&loop);
6916 // Load the type of the current stack handler.
6917 const int kStateOffset = StackHandlerConstants::kStateOffset;
6918 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
6919 __ j(equal, &done);
6920 // Fetch the next handler in the list.
6921 const int kNextOffset = StackHandlerConstants::kNextOffset;
6922 __ movq(rsp, Operand(rsp, kNextOffset));
6923 __ jmp(&loop);
6924 __ bind(&done);
6925
6926 // Set the top handler address to next handler past the current ENTRY handler.
6927 __ movq(kScratchRegister, handler_address);
6928 __ pop(Operand(kScratchRegister, 0));
6929
6930 if (type == OUT_OF_MEMORY) {
6931 // Set external caught exception to false.
6932 ExternalReference external_caught(Top::k_external_caught_exception_address);
6933 __ movq(rax, Immediate(false));
6934 __ store_rax(external_caught);
6935
6936 // Set pending exception and rax to out of memory exception.
6937 ExternalReference pending_exception(Top::k_pending_exception_address);
6938 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
6939 __ store_rax(pending_exception);
6940 }
6941
6942 // Clear the context pointer.
6943 __ xor_(rsi, rsi);
6944
6945 // Restore registers from handler.
6946 ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
6947 StackHandlerConstants::kFPOffset);
6948 __ pop(rbp); // FP
6949 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6950 StackHandlerConstants::kStateOffset);
6951 __ pop(rdx); // State
6952
6953 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6954 StackHandlerConstants::kPCOffset);
6955 __ ret(0);
6956}
6957
6958
6959void CallFunctionStub::Generate(MacroAssembler* masm) {
6960 Label slow;
6961
6962 // Get the function to call from the stack.
6963 // +2 ~ receiver, return address
6964 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
6965
6966 // Check that the function really is a JavaScript function.
6967 __ JumpIfSmi(rdi, &slow);
6968 // Goto slow case if we do not have a function.
6969 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
6970 __ j(not_equal, &slow);
6971
6972 // Fast-case: Just invoke the function.
6973 ParameterCount actual(argc_);
6974 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
6975
6976 // Slow-case: Non-function called.
6977 __ bind(&slow);
6978 __ Set(rax, argc_);
6979 __ Set(rbx, 0);
6980 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
6981 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
6982 __ Jump(adaptor, RelocInfo::CODE_TARGET);
6983}
6984
6985
6986void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
6987 // rax: number of arguments including receiver
6988 // rbx: pointer to C function (C callee-saved)
6989 // rbp: frame pointer of calling JS frame (restored after C call)
6990 // rsp: stack pointer (restored after C call)
6991 // rsi: current context (restored)
6992
6993 // NOTE: Invocations of builtins may return failure objects
6994 // instead of a proper result. The builtin entry handles
6995 // this by performing a garbage collection and retrying the
6996 // builtin once.
6997
6998 StackFrame::Type frame_type = is_debug_break ?
6999 StackFrame::EXIT_DEBUG :
7000 StackFrame::EXIT;
7001
7002 // Enter the exit frame that transitions from JavaScript to C++.
7003 __ EnterExitFrame(frame_type, result_size_);
7004
7005 // rax: Holds the context at this point, but should not be used.
7006 // On entry to code generated by GenerateCore, it must hold
7007 // a failure result if the collect_garbage argument to GenerateCore
7008 // is true. This failure result can be the result of code
7009 // generated by a previous call to GenerateCore. The value
7010 // of rax is then passed to Runtime::PerformGC.
7011 // rbx: pointer to builtin function (C callee-saved).
7012 // rbp: frame pointer of exit frame (restored after C call).
7013 // rsp: stack pointer (restored after C call).
7014 // r14: number of arguments including receiver (C callee-saved).
7015 // r15: argv pointer (C callee-saved).
7016
7017 Label throw_normal_exception;
7018 Label throw_termination_exception;
7019 Label throw_out_of_memory_exception;
7020
7021 // Call into the runtime system.
7022 GenerateCore(masm,
7023 &throw_normal_exception,
7024 &throw_termination_exception,
7025 &throw_out_of_memory_exception,
7026 frame_type,
7027 false,
7028 false);
7029
7030 // Do space-specific GC and retry runtime call.
7031 GenerateCore(masm,
7032 &throw_normal_exception,
7033 &throw_termination_exception,
7034 &throw_out_of_memory_exception,
7035 frame_type,
7036 true,
7037 false);
7038
7039 // Do full GC and retry runtime call one final time.
7040 Failure* failure = Failure::InternalError();
7041 __ movq(rax, failure, RelocInfo::NONE);
7042 GenerateCore(masm,
7043 &throw_normal_exception,
7044 &throw_termination_exception,
7045 &throw_out_of_memory_exception,
7046 frame_type,
7047 true,
7048 true);
7049
7050 __ bind(&throw_out_of_memory_exception);
7051 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
7052
7053 __ bind(&throw_termination_exception);
7054 GenerateThrowUncatchable(masm, TERMINATION);
7055
7056 __ bind(&throw_normal_exception);
7057 GenerateThrowTOS(masm);
7058}
7059
7060
7061void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
7062 Label invoke, exit;
7063#ifdef ENABLE_LOGGING_AND_PROFILING
7064 Label not_outermost_js, not_outermost_js_2;
7065#endif
7066
7067 // Setup frame.
7068 __ push(rbp);
7069 __ movq(rbp, rsp);
7070
7071 // Push the stack frame type marker twice.
7072 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
Steve Block3ce2e202009-11-05 08:53:23 +00007073 __ Push(Smi::FromInt(marker)); // context slot
7074 __ Push(Smi::FromInt(marker)); // function slot
Steve Blocka7e24c12009-10-30 11:49:00 +00007075 // Save callee-saved registers (X64 calling conventions).
7076 __ push(r12);
7077 __ push(r13);
7078 __ push(r14);
7079 __ push(r15);
7080 __ push(rdi);
7081 __ push(rsi);
7082 __ push(rbx);
7083 // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
7084 // callee-save in JS code as well.
7085
7086 // Save copies of the top frame descriptor on the stack.
7087 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
7088 __ load_rax(c_entry_fp);
7089 __ push(rax);
7090
7091#ifdef ENABLE_LOGGING_AND_PROFILING
7092 // If this is the outermost JS call, set js_entry_sp value.
7093 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
7094 __ load_rax(js_entry_sp);
7095 __ testq(rax, rax);
7096 __ j(not_zero, &not_outermost_js);
7097 __ movq(rax, rbp);
7098 __ store_rax(js_entry_sp);
7099 __ bind(&not_outermost_js);
7100#endif
7101
7102 // Call a faked try-block that does the invoke.
7103 __ call(&invoke);
7104
7105 // Caught exception: Store result (exception) in the pending
7106 // exception field in the JSEnv and return a failure sentinel.
7107 ExternalReference pending_exception(Top::k_pending_exception_address);
7108 __ store_rax(pending_exception);
7109 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
7110 __ jmp(&exit);
7111
7112 // Invoke: Link this frame into the handler chain.
7113 __ bind(&invoke);
7114 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
7115
7116 // Clear any pending exceptions.
7117 __ load_rax(ExternalReference::the_hole_value_location());
7118 __ store_rax(pending_exception);
7119
7120 // Fake a receiver (NULL).
7121 __ push(Immediate(0)); // receiver
7122
7123 // Invoke the function by calling through JS entry trampoline
7124 // builtin and pop the faked function when we return. We load the address
7125 // from an external reference instead of inlining the call target address
7126 // directly in the code, because the builtin stubs may not have been
7127 // generated yet at the time this code is generated.
7128 if (is_construct) {
7129 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
7130 __ load_rax(construct_entry);
7131 } else {
7132 ExternalReference entry(Builtins::JSEntryTrampoline);
7133 __ load_rax(entry);
7134 }
7135 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
7136 __ call(kScratchRegister);
7137
7138 // Unlink this frame from the handler chain.
7139 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
7140 __ pop(Operand(kScratchRegister, 0));
7141 // Pop next_sp.
7142 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
7143
7144#ifdef ENABLE_LOGGING_AND_PROFILING
7145 // If current EBP value is the same as js_entry_sp value, it means that
7146 // the current function is the outermost.
7147 __ movq(kScratchRegister, js_entry_sp);
7148 __ cmpq(rbp, Operand(kScratchRegister, 0));
7149 __ j(not_equal, &not_outermost_js_2);
7150 __ movq(Operand(kScratchRegister, 0), Immediate(0));
7151 __ bind(&not_outermost_js_2);
7152#endif
7153
7154 // Restore the top frame descriptor from the stack.
7155 __ bind(&exit);
7156 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
7157 __ pop(Operand(kScratchRegister, 0));
7158
7159 // Restore callee-saved registers (X64 conventions).
7160 __ pop(rbx);
7161 __ pop(rsi);
7162 __ pop(rdi);
7163 __ pop(r15);
7164 __ pop(r14);
7165 __ pop(r13);
7166 __ pop(r12);
7167 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
7168
7169 // Restore frame pointer and return.
7170 __ pop(rbp);
7171 __ ret(0);
7172}
7173
7174
7175// -----------------------------------------------------------------------------
7176// Implementation of stubs.
7177
7178// Stub classes have public member named masm, not masm_.
7179
7180void StackCheckStub::Generate(MacroAssembler* masm) {
7181 // Because builtins always remove the receiver from the stack, we
7182 // have to fake one to avoid underflowing the stack. The receiver
7183 // must be inserted below the return address on the stack so we
7184 // temporarily store that in a register.
7185 __ pop(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00007186 __ Push(Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00007187 __ push(rax);
7188
7189 // Do tail-call to runtime routine.
7190 Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
7191 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
7192}
7193
7194
Steve Blocka7e24c12009-10-30 11:49:00 +00007195void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7196 Register number) {
7197 Label load_smi, done;
7198
7199 __ JumpIfSmi(number, &load_smi);
7200 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7201 __ jmp(&done);
7202
7203 __ bind(&load_smi);
7204 __ SmiToInteger32(number, number);
7205 __ push(number);
7206 __ fild_s(Operand(rsp, 0));
7207 __ pop(number);
7208
7209 __ bind(&done);
7210}
7211
7212
7213void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7214 Register src,
7215 XMMRegister dst) {
7216 Label load_smi, done;
7217
7218 __ JumpIfSmi(src, &load_smi);
7219 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
7220 __ jmp(&done);
7221
7222 __ bind(&load_smi);
7223 __ SmiToInteger32(src, src);
7224 __ cvtlsi2sd(dst, src);
7225
7226 __ bind(&done);
7227}
7228
7229
7230void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7231 XMMRegister dst1,
7232 XMMRegister dst2) {
7233 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7234 LoadFloatOperand(masm, kScratchRegister, dst1);
7235 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7236 LoadFloatOperand(masm, kScratchRegister, dst2);
7237}
7238
7239
7240void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
7241 const Operand& src,
7242 Register dst) {
7243 // TODO(X64): Convert number operands to int32 values.
7244 // Don't convert a Smi to a double first.
7245 UNIMPLEMENTED();
7246}
7247
7248
7249void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
7250 Label load_smi_1, load_smi_2, done_load_1, done;
7251 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7252 __ JumpIfSmi(kScratchRegister, &load_smi_1);
7253 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7254 __ bind(&done_load_1);
7255
7256 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7257 __ JumpIfSmi(kScratchRegister, &load_smi_2);
7258 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7259 __ jmp(&done);
7260
7261 __ bind(&load_smi_1);
7262 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7263 __ push(kScratchRegister);
7264 __ fild_s(Operand(rsp, 0));
7265 __ pop(kScratchRegister);
7266 __ jmp(&done_load_1);
7267
7268 __ bind(&load_smi_2);
7269 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7270 __ push(kScratchRegister);
7271 __ fild_s(Operand(rsp, 0));
7272 __ pop(kScratchRegister);
7273
7274 __ bind(&done);
7275}
7276
7277
7278void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7279 Register lhs,
7280 Register rhs) {
7281 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
7282 __ JumpIfSmi(lhs, &load_smi_lhs);
7283 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
7284 __ bind(&done_load_lhs);
7285
7286 __ JumpIfSmi(rhs, &load_smi_rhs);
7287 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
7288 __ jmp(&done);
7289
7290 __ bind(&load_smi_lhs);
7291 __ SmiToInteger64(kScratchRegister, lhs);
7292 __ push(kScratchRegister);
7293 __ fild_d(Operand(rsp, 0));
7294 __ pop(kScratchRegister);
7295 __ jmp(&done_load_lhs);
7296
7297 __ bind(&load_smi_rhs);
7298 __ SmiToInteger64(kScratchRegister, rhs);
7299 __ push(kScratchRegister);
7300 __ fild_d(Operand(rsp, 0));
7301 __ pop(kScratchRegister);
7302
7303 __ bind(&done);
7304}
7305
7306
Steve Block3ce2e202009-11-05 08:53:23 +00007307void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
7308 Label* non_float) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007309 Label test_other, done;
7310 // Test if both operands are numbers (heap_numbers or smis).
7311 // If not, jump to label non_float.
7312 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
7313 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
7314 __ j(not_equal, non_float); // The argument in rdx is not a number.
7315
7316 __ bind(&test_other);
7317 __ JumpIfSmi(rax, &done); // argument in rax is OK
7318 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
7319 __ j(not_equal, non_float); // The argument in rax is not a number.
7320
7321 // Fall-through: Both operands are numbers.
7322 __ bind(&done);
7323}
7324
7325
7326const char* GenericBinaryOpStub::GetName() {
7327 switch (op_) {
7328 case Token::ADD: return "GenericBinaryOpStub_ADD";
7329 case Token::SUB: return "GenericBinaryOpStub_SUB";
7330 case Token::MUL: return "GenericBinaryOpStub_MUL";
7331 case Token::DIV: return "GenericBinaryOpStub_DIV";
7332 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
7333 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
7334 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
7335 case Token::SAR: return "GenericBinaryOpStub_SAR";
7336 case Token::SHL: return "GenericBinaryOpStub_SHL";
7337 case Token::SHR: return "GenericBinaryOpStub_SHR";
7338 default: return "GenericBinaryOpStub";
7339 }
7340}
7341
7342
7343void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7344 // Perform fast-case smi code for the operation (rax <op> rbx) and
7345 // leave result in register rax.
7346
7347 // Smi check both operands.
7348 __ JumpIfNotBothSmi(rax, rbx, slow);
7349
7350 switch (op_) {
7351 case Token::ADD: {
7352 __ SmiAdd(rax, rax, rbx, slow);
7353 break;
7354 }
7355
7356 case Token::SUB: {
7357 __ SmiSub(rax, rax, rbx, slow);
7358 break;
7359 }
7360
7361 case Token::MUL:
7362 __ SmiMul(rax, rax, rbx, slow);
7363 break;
7364
7365 case Token::DIV:
7366 __ SmiDiv(rax, rax, rbx, slow);
7367 break;
7368
7369 case Token::MOD:
7370 __ SmiMod(rax, rax, rbx, slow);
7371 break;
7372
7373 case Token::BIT_OR:
7374 __ SmiOr(rax, rax, rbx);
7375 break;
7376
7377 case Token::BIT_AND:
7378 __ SmiAnd(rax, rax, rbx);
7379 break;
7380
7381 case Token::BIT_XOR:
7382 __ SmiXor(rax, rax, rbx);
7383 break;
7384
7385 case Token::SHL:
7386 case Token::SHR:
7387 case Token::SAR:
7388 // Move the second operand into register ecx.
Steve Block3ce2e202009-11-05 08:53:23 +00007389 __ movq(rcx, rbx);
Steve Blocka7e24c12009-10-30 11:49:00 +00007390 // Perform the operation.
7391 switch (op_) {
7392 case Token::SAR:
Steve Block3ce2e202009-11-05 08:53:23 +00007393 __ SmiShiftArithmeticRight(rax, rax, rcx);
Steve Blocka7e24c12009-10-30 11:49:00 +00007394 break;
7395 case Token::SHR:
Steve Block3ce2e202009-11-05 08:53:23 +00007396 __ SmiShiftLogicalRight(rax, rax, rcx, slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007397 break;
7398 case Token::SHL:
Steve Block3ce2e202009-11-05 08:53:23 +00007399 __ SmiShiftLeft(rax, rax, rcx, slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007400 break;
7401 default:
7402 UNREACHABLE();
7403 }
7404 break;
7405
7406 default:
7407 UNREACHABLE();
7408 break;
7409 }
7410}
7411
7412
7413void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7414 Label call_runtime;
7415 if (flags_ == SMI_CODE_IN_STUB) {
7416 // The fast case smi code wasn't inlined in the stub caller
7417 // code. Generate it here to speed up common operations.
7418 Label slow;
7419 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
7420 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
7421 GenerateSmiCode(masm, &slow);
7422 __ ret(2 * kPointerSize); // remove both operands
7423
7424 // Too bad. The fast case smi code didn't succeed.
7425 __ bind(&slow);
7426 }
7427
7428 // Setup registers.
7429 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
7430 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
7431
7432 // Floating point case.
7433 switch (op_) {
7434 case Token::ADD:
7435 case Token::SUB:
7436 case Token::MUL:
7437 case Token::DIV: {
7438 // rax: y
7439 // rdx: x
Steve Block3ce2e202009-11-05 08:53:23 +00007440 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007441 // Fast-case: Both operands are numbers.
7442 // Allocate a heap number, if needed.
7443 Label skip_allocation;
7444 switch (mode_) {
7445 case OVERWRITE_LEFT:
7446 __ movq(rax, rdx);
7447 // Fall through!
7448 case OVERWRITE_RIGHT:
7449 // If the argument in rax is already an object, we skip the
7450 // allocation of a heap number.
7451 __ JumpIfNotSmi(rax, &skip_allocation);
7452 // Fall through!
7453 case NO_OVERWRITE:
Steve Block3ce2e202009-11-05 08:53:23 +00007454 __ AllocateHeapNumber(rax, rcx, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007455 __ bind(&skip_allocation);
7456 break;
7457 default: UNREACHABLE();
7458 }
7459 // xmm4 and xmm5 are volatile XMM registers.
7460 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
7461
7462 switch (op_) {
7463 case Token::ADD: __ addsd(xmm4, xmm5); break;
7464 case Token::SUB: __ subsd(xmm4, xmm5); break;
7465 case Token::MUL: __ mulsd(xmm4, xmm5); break;
7466 case Token::DIV: __ divsd(xmm4, xmm5); break;
7467 default: UNREACHABLE();
7468 }
7469 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
7470 __ ret(2 * kPointerSize);
7471 }
7472 case Token::MOD: {
7473 // For MOD we go directly to runtime in the non-smi case.
7474 break;
7475 }
7476 case Token::BIT_OR:
7477 case Token::BIT_AND:
7478 case Token::BIT_XOR:
7479 case Token::SAR:
7480 case Token::SHL:
7481 case Token::SHR: {
Steve Block3ce2e202009-11-05 08:53:23 +00007482 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007483 // TODO(X64): Don't convert a Smi to float and then back to int32
7484 // afterwards.
7485 FloatingPointHelper::LoadFloatOperands(masm);
7486
7487 Label skip_allocation, non_smi_result, operand_conversion_failure;
7488
7489 // Reserve space for converted numbers.
7490 __ subq(rsp, Immediate(2 * kPointerSize));
7491
7492 if (use_sse3_) {
7493 // Truncate the operands to 32-bit integers and check for
7494 // exceptions in doing so.
7495 CpuFeatures::Scope scope(CpuFeatures::SSE3);
7496 __ fisttp_s(Operand(rsp, 0 * kPointerSize));
7497 __ fisttp_s(Operand(rsp, 1 * kPointerSize));
7498 __ fnstsw_ax();
7499 __ testl(rax, Immediate(1));
7500 __ j(not_zero, &operand_conversion_failure);
7501 } else {
7502 // Check if right operand is int32.
7503 __ fist_s(Operand(rsp, 0 * kPointerSize));
7504 __ fild_s(Operand(rsp, 0 * kPointerSize));
Steve Block3ce2e202009-11-05 08:53:23 +00007505 __ FCmp();
7506 __ j(not_zero, &operand_conversion_failure);
7507 __ j(parity_even, &operand_conversion_failure);
7508
Steve Blocka7e24c12009-10-30 11:49:00 +00007509 // Check if left operand is int32.
7510 __ fist_s(Operand(rsp, 1 * kPointerSize));
7511 __ fild_s(Operand(rsp, 1 * kPointerSize));
Steve Block3ce2e202009-11-05 08:53:23 +00007512 __ FCmp();
7513 __ j(not_zero, &operand_conversion_failure);
7514 __ j(parity_even, &operand_conversion_failure);
Steve Blocka7e24c12009-10-30 11:49:00 +00007515 }
7516
7517 // Get int32 operands and perform bitop.
7518 __ pop(rcx);
7519 __ pop(rax);
7520 switch (op_) {
Steve Block3ce2e202009-11-05 08:53:23 +00007521 case Token::BIT_OR: __ orl(rax, rcx); break;
7522 case Token::BIT_AND: __ andl(rax, rcx); break;
7523 case Token::BIT_XOR: __ xorl(rax, rcx); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00007524 case Token::SAR: __ sarl(rax); break;
7525 case Token::SHL: __ shll(rax); break;
7526 case Token::SHR: __ shrl(rax); break;
7527 default: UNREACHABLE();
7528 }
7529 if (op_ == Token::SHR) {
Steve Block3ce2e202009-11-05 08:53:23 +00007530 // Check if result is non-negative. This can only happen for a shift
7531 // by zero, which also doesn't update the sign flag.
7532 __ testl(rax, rax);
Steve Blocka7e24c12009-10-30 11:49:00 +00007533 __ j(negative, &non_smi_result);
7534 }
Steve Block3ce2e202009-11-05 08:53:23 +00007535 __ JumpIfNotValidSmiValue(rax, &non_smi_result);
7536 // Tag smi result, if possible, and return.
Steve Blocka7e24c12009-10-30 11:49:00 +00007537 __ Integer32ToSmi(rax, rax);
7538 __ ret(2 * kPointerSize);
7539
7540 // All ops except SHR return a signed int32 that we load in a HeapNumber.
Steve Block3ce2e202009-11-05 08:53:23 +00007541 if (op_ != Token::SHR && non_smi_result.is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007542 __ bind(&non_smi_result);
7543 // Allocate a heap number if needed.
7544 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
7545 switch (mode_) {
7546 case OVERWRITE_LEFT:
7547 case OVERWRITE_RIGHT:
7548 // If the operand was an object, we skip the
7549 // allocation of a heap number.
7550 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7551 1 * kPointerSize : 2 * kPointerSize));
7552 __ JumpIfNotSmi(rax, &skip_allocation);
7553 // Fall through!
7554 case NO_OVERWRITE:
Steve Block3ce2e202009-11-05 08:53:23 +00007555 __ AllocateHeapNumber(rax, rcx, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007556 __ bind(&skip_allocation);
7557 break;
7558 default: UNREACHABLE();
7559 }
7560 // Store the result in the HeapNumber and return.
7561 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
7562 __ fild_s(Operand(rsp, 1 * kPointerSize));
7563 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
7564 __ ret(2 * kPointerSize);
7565 }
7566
7567 // Clear the FPU exception flag and reset the stack before calling
7568 // the runtime system.
7569 __ bind(&operand_conversion_failure);
7570 __ addq(rsp, Immediate(2 * kPointerSize));
7571 if (use_sse3_) {
7572 // If we've used the SSE3 instructions for truncating the
7573 // floating point values to integers and it failed, we have a
7574 // pending #IA exception. Clear it.
7575 __ fnclex();
7576 } else {
7577 // The non-SSE3 variant does early bailout if the right
7578 // operand isn't a 32-bit integer, so we may have a single
7579 // value on the FPU stack we need to get rid of.
7580 __ ffree(0);
7581 }
7582
7583 // SHR should return uint32 - go to runtime for non-smi/negative result.
7584 if (op_ == Token::SHR) {
7585 __ bind(&non_smi_result);
7586 }
7587 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7588 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7589 break;
7590 }
7591 default: UNREACHABLE(); break;
7592 }
7593
7594 // If all else fails, use the runtime system to get the correct
7595 // result.
7596 __ bind(&call_runtime);
7597 switch (op_) {
7598 case Token::ADD:
7599 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
7600 break;
7601 case Token::SUB:
7602 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
7603 break;
7604 case Token::MUL:
7605 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
7606 break;
7607 case Token::DIV:
7608 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
7609 break;
7610 case Token::MOD:
7611 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
7612 break;
7613 case Token::BIT_OR:
7614 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
7615 break;
7616 case Token::BIT_AND:
7617 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
7618 break;
7619 case Token::BIT_XOR:
7620 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
7621 break;
7622 case Token::SAR:
7623 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
7624 break;
7625 case Token::SHL:
7626 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
7627 break;
7628 case Token::SHR:
7629 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
7630 break;
7631 default:
7632 UNREACHABLE();
7633 }
7634}
7635
7636
7637int CompareStub::MinorKey() {
7638 // Encode the two parameters in a unique 16 bit value.
7639 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7640 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7641}
7642
Steve Block3ce2e202009-11-05 08:53:23 +00007643#undef __
7644
7645#define __ masm.
7646
7647#ifdef _WIN64
7648typedef double (*ModuloFunction)(double, double);
7649// Define custom fmod implementation.
7650ModuloFunction CreateModuloFunction() {
7651 size_t actual_size;
7652 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
7653 &actual_size,
7654 true));
7655 CHECK(buffer);
7656 Assembler masm(buffer, actual_size);
7657 // Generated code is put into a fixed, unmovable, buffer, and not into
7658 // the V8 heap. We can't, and don't, refer to any relocatable addresses
7659 // (e.g. the JavaScript nan-object).
7660
7661 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
7662 // returns result in xmm0.
7663 // Argument backing space is allocated on the stack above
7664 // the return address.
7665
7666 // Compute x mod y.
7667 // Load y and x (use argument backing store as temporary storage).
7668 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
7669 __ movsd(Operand(rsp, kPointerSize), xmm0);
7670 __ fld_d(Operand(rsp, kPointerSize * 2));
7671 __ fld_d(Operand(rsp, kPointerSize));
7672
7673 // Clear exception flags before operation.
7674 {
7675 Label no_exceptions;
7676 __ fwait();
7677 __ fnstsw_ax();
7678 // Clear if Illegal Operand or Zero Division exceptions are set.
7679 __ testb(rax, Immediate(5));
7680 __ j(zero, &no_exceptions);
7681 __ fnclex();
7682 __ bind(&no_exceptions);
7683 }
7684
7685 // Compute st(0) % st(1)
7686 {
7687 Label partial_remainder_loop;
7688 __ bind(&partial_remainder_loop);
7689 __ fprem();
7690 __ fwait();
7691 __ fnstsw_ax();
7692 __ testl(rax, Immediate(0x400 /* C2 */));
7693 // If C2 is set, computation only has partial result. Loop to
7694 // continue computation.
7695 __ j(not_zero, &partial_remainder_loop);
7696 }
7697
7698 Label valid_result;
7699 Label return_result;
7700 // If Invalid Operand or Zero Division exceptions are set,
7701 // return NaN.
7702 __ testb(rax, Immediate(5));
7703 __ j(zero, &valid_result);
7704 __ fstp(0); // Drop result in st(0).
7705 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
7706 __ movq(rcx, kNaNValue, RelocInfo::NONE);
7707 __ movq(Operand(rsp, kPointerSize), rcx);
7708 __ movsd(xmm0, Operand(rsp, kPointerSize));
7709 __ jmp(&return_result);
7710
7711 // If result is valid, return that.
7712 __ bind(&valid_result);
7713 __ fstp_d(Operand(rsp, kPointerSize));
7714 __ movsd(xmm0, Operand(rsp, kPointerSize));
7715
7716 // Clean up FPU stack and exceptions and return xmm0
7717 __ bind(&return_result);
7718 __ fstp(0); // Unload y.
7719
7720 Label clear_exceptions;
7721 __ testb(rax, Immediate(0x3f /* Any Exception*/));
7722 __ j(not_zero, &clear_exceptions);
7723 __ ret(0);
7724 __ bind(&clear_exceptions);
7725 __ fnclex();
7726 __ ret(0);
7727
7728 CodeDesc desc;
7729 masm.GetCode(&desc);
7730 // Call the function from C++.
7731 return FUNCTION_CAST<ModuloFunction>(buffer);
7732}
7733
7734#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00007735
7736#undef __
7737
7738} } // namespace v8::internal