blob: 36f0e635ffba55ef74df31dfecb5bddb6616172c [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "ic-inl.h"
35#include "parser.h"
36#include "register-allocator-inl.h"
37#include "scopes.h"
38
39namespace v8 {
40namespace internal {
41
42#define __ ACCESS_MASM(masm_)
43
44// -------------------------------------------------------------------------
45// Platform-specific DeferredCode functions.
46
47void DeferredCode::SaveRegisters() {
48 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
49 int action = registers_[i];
50 if (action == kPush) {
51 __ push(RegisterAllocator::ToRegister(i));
52 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
53 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
54 }
55 }
56}
57
Steve Block3ce2e202009-11-05 08:53:23 +000058
Steve Blocka7e24c12009-10-30 11:49:00 +000059void DeferredCode::RestoreRegisters() {
60 // Restore registers in reverse order due to the stack.
61 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
62 int action = registers_[i];
63 if (action == kPush) {
64 __ pop(RegisterAllocator::ToRegister(i));
65 } else if (action != kIgnore) {
66 action &= ~kSyncedFlag;
67 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
68 }
69 }
70}
71
72
73// -------------------------------------------------------------------------
74// CodeGenState implementation.
75
76CodeGenState::CodeGenState(CodeGenerator* owner)
77 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000078 destination_(NULL),
79 previous_(NULL) {
80 owner_->set_state(this);
81}
82
83
84CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +000085 ControlDestination* destination)
86 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000087 destination_(destination),
88 previous_(owner->state()) {
89 owner_->set_state(this);
90}
91
92
93CodeGenState::~CodeGenState() {
94 ASSERT(owner_->state() == this);
95 owner_->set_state(previous_);
96}
97
98
99// -------------------------------------------------------------------------
100// Deferred code objects
101//
102// These subclasses of DeferredCode add pieces of code to the end of generated
103// code. They are branched to from the generated code, and
104// keep some slower code out of the main body of the generated code.
105// Many of them call a code stub or a runtime function.
106
107class DeferredInlineSmiAdd: public DeferredCode {
108 public:
109 DeferredInlineSmiAdd(Register dst,
110 Smi* value,
111 OverwriteMode overwrite_mode)
112 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
113 set_comment("[ DeferredInlineSmiAdd");
114 }
115
116 virtual void Generate();
117
118 private:
119 Register dst_;
120 Smi* value_;
121 OverwriteMode overwrite_mode_;
122};
123
124
125// The result of value + src is in dst. It either overflowed or was not
126// smi tagged. Undo the speculative addition and call the appropriate
127// specialized stub for add. The result is left in dst.
128class DeferredInlineSmiAddReversed: public DeferredCode {
129 public:
130 DeferredInlineSmiAddReversed(Register dst,
131 Smi* value,
132 OverwriteMode overwrite_mode)
133 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
134 set_comment("[ DeferredInlineSmiAddReversed");
135 }
136
137 virtual void Generate();
138
139 private:
140 Register dst_;
141 Smi* value_;
142 OverwriteMode overwrite_mode_;
143};
144
145
146class DeferredInlineSmiSub: public DeferredCode {
147 public:
148 DeferredInlineSmiSub(Register dst,
149 Smi* value,
150 OverwriteMode overwrite_mode)
151 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
152 set_comment("[ DeferredInlineSmiSub");
153 }
154
155 virtual void Generate();
156
157 private:
158 Register dst_;
159 Smi* value_;
160 OverwriteMode overwrite_mode_;
161};
162
163
164// Call the appropriate binary operation stub to compute src op value
165// and leave the result in dst.
166class DeferredInlineSmiOperation: public DeferredCode {
167 public:
168 DeferredInlineSmiOperation(Token::Value op,
169 Register dst,
170 Register src,
171 Smi* value,
172 OverwriteMode overwrite_mode)
173 : op_(op),
174 dst_(dst),
175 src_(src),
176 value_(value),
177 overwrite_mode_(overwrite_mode) {
178 set_comment("[ DeferredInlineSmiOperation");
179 }
180
181 virtual void Generate();
182
183 private:
184 Token::Value op_;
185 Register dst_;
186 Register src_;
187 Smi* value_;
188 OverwriteMode overwrite_mode_;
189};
190
191
192class FloatingPointHelper : public AllStatic {
193 public:
194 // Code pattern for loading a floating point value. Input value must
195 // be either a smi or a heap number object (fp value). Requirements:
196 // operand on TOS+1. Returns operand as floating point number on FPU
197 // stack.
198 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
199
200 // Code pattern for loading a floating point value. Input value must
201 // be either a smi or a heap number object (fp value). Requirements:
202 // operand in src register. Returns operand as floating point number
203 // in XMM register
204 static void LoadFloatOperand(MacroAssembler* masm,
205 Register src,
206 XMMRegister dst);
207
208 // Code pattern for loading floating point values. Input values must
209 // be either smi or heap number objects (fp values). Requirements:
210 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
211 // floating point numbers in XMM registers.
212 static void LoadFloatOperands(MacroAssembler* masm,
213 XMMRegister dst1,
214 XMMRegister dst2);
215
216 // Code pattern for loading floating point values onto the fp stack.
217 // Input values must be either smi or heap number objects (fp values).
218 // Requirements:
219 // Register version: operands in registers lhs and rhs.
220 // Stack version: operands on TOS+1 and TOS+2.
221 // Returns operands as floating point numbers on fp stack.
222 static void LoadFloatOperands(MacroAssembler* masm);
223 static void LoadFloatOperands(MacroAssembler* masm,
224 Register lhs,
225 Register rhs);
226
227 // Code pattern for loading a floating point value and converting it
228 // to a 32 bit integer. Input value must be either a smi or a heap number
229 // object.
230 // Returns operands as 32-bit sign extended integers in a general purpose
231 // registers.
232 static void LoadInt32Operand(MacroAssembler* masm,
233 const Operand& src,
234 Register dst);
235
236 // Test if operands are smi or number objects (fp). Requirements:
237 // operand_1 in rax, operand_2 in rdx; falls through on float or smi
238 // operands, jumps to the non_float label otherwise.
Steve Block3ce2e202009-11-05 08:53:23 +0000239 static void CheckNumberOperands(MacroAssembler* masm,
240 Label* non_float);
Steve Blocka7e24c12009-10-30 11:49:00 +0000241};
242
243
244// -----------------------------------------------------------------------------
245// CodeGenerator implementation.
246
247CodeGenerator::CodeGenerator(int buffer_size,
248 Handle<Script> script,
249 bool is_eval)
250 : is_eval_(is_eval),
251 script_(script),
252 deferred_(8),
253 masm_(new MacroAssembler(NULL, buffer_size)),
254 scope_(NULL),
255 frame_(NULL),
256 allocator_(NULL),
257 state_(NULL),
258 loop_nesting_(0),
259 function_return_is_shadowed_(false),
260 in_spilled_code_(false) {
261}
262
263
264void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
265 // Call the runtime to declare the globals. The inevitable call
266 // will sync frame elements to memory anyway, so we do it eagerly to
267 // allow us to push the arguments directly into place.
268 frame_->SyncRange(0, frame_->element_count() - 1);
269
270 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
Steve Block3ce2e202009-11-05 08:53:23 +0000271 frame_->EmitPush(rsi); // The context is the first argument.
Steve Blocka7e24c12009-10-30 11:49:00 +0000272 frame_->EmitPush(kScratchRegister);
Steve Block3ce2e202009-11-05 08:53:23 +0000273 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
275 // Return value is ignored.
276}
277
278
279void CodeGenerator::GenCode(FunctionLiteral* function) {
280 // Record the position for debugging purposes.
281 CodeForFunctionPosition(function);
282 ZoneList<Statement*>* body = function->body();
283
284 // Initialize state.
285 ASSERT(scope_ == NULL);
286 scope_ = function->scope();
287 ASSERT(allocator_ == NULL);
288 RegisterAllocator register_allocator(this);
289 allocator_ = &register_allocator;
290 ASSERT(frame_ == NULL);
291 frame_ = new VirtualFrame();
292 set_in_spilled_code(false);
293
294 // Adjust for function-level loop nesting.
295 loop_nesting_ += function->loop_nesting();
296
297 JumpTarget::set_compiling_deferred_code(false);
298
299#ifdef DEBUG
300 if (strlen(FLAG_stop_at) > 0 &&
301 function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
302 frame_->SpillAll();
303 __ int3();
304 }
305#endif
306
307 // New scope to get automatic timing calculation.
308 { // NOLINT
309 HistogramTimerScope codegen_timer(&Counters::code_generation);
310 CodeGenState state(this);
311
312 // Entry:
313 // Stack: receiver, arguments, return address.
314 // rbp: caller's frame pointer
315 // rsp: stack pointer
316 // rdi: called JS function
317 // rsi: callee's context
318 allocator_->Initialize();
319 frame_->Enter();
320
321 // Allocate space for locals and initialize them.
322 frame_->AllocateStackSlots();
323 // Initialize the function return target after the locals are set
324 // up, because it needs the expected frame height from the frame.
325 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
326 function_return_is_shadowed_ = false;
327
328 // Allocate the local context if needed.
329 if (scope_->num_heap_slots() > 0) {
330 Comment cmnt(masm_, "[ allocate local context");
331 // Allocate local context.
332 // Get outer context and create a new context based on it.
333 frame_->PushFunction();
334 Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
335
336 // Update context local.
337 frame_->SaveContextRegister();
338
339 // Verify that the runtime call result and rsi agree.
340 if (FLAG_debug_code) {
341 __ cmpq(context.reg(), rsi);
342 __ Assert(equal, "Runtime::NewContext should end up in rsi");
343 }
344 }
345
346 // TODO(1241774): Improve this code:
347 // 1) only needed if we have a context
348 // 2) no need to recompute context ptr every single time
349 // 3) don't copy parameter operand code from SlotOperand!
350 {
351 Comment cmnt2(masm_, "[ copy context parameters into .context");
352
353 // Note that iteration order is relevant here! If we have the same
354 // parameter twice (e.g., function (x, y, x)), and that parameter
355 // needs to be copied into the context, it must be the last argument
356 // passed to the parameter that needs to be copied. This is a rare
357 // case so we don't check for it, instead we rely on the copying
358 // order: such a parameter is copied repeatedly into the same
359 // context location and thus the last value is what is seen inside
360 // the function.
361 for (int i = 0; i < scope_->num_parameters(); i++) {
362 Variable* par = scope_->parameter(i);
363 Slot* slot = par->slot();
364 if (slot != NULL && slot->type() == Slot::CONTEXT) {
365 // The use of SlotOperand below is safe in unspilled code
366 // because the slot is guaranteed to be a context slot.
367 //
368 // There are no parameters in the global scope.
369 ASSERT(!scope_->is_global_scope());
370 frame_->PushParameterAt(i);
371 Result value = frame_->Pop();
372 value.ToRegister();
373
374 // SlotOperand loads context.reg() with the context object
375 // stored to, used below in RecordWrite.
376 Result context = allocator_->Allocate();
377 ASSERT(context.is_valid());
378 __ movq(SlotOperand(slot, context.reg()), value.reg());
379 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
380 Result scratch = allocator_->Allocate();
381 ASSERT(scratch.is_valid());
382 frame_->Spill(context.reg());
383 frame_->Spill(value.reg());
384 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
385 }
386 }
387 }
388
389 // Store the arguments object. This must happen after context
390 // initialization because the arguments object may be stored in
391 // the context.
392 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
393 StoreArgumentsObject(true);
394 }
395
396 // Generate code to 'execute' declarations and initialize functions
397 // (source elements). In case of an illegal redeclaration we need to
398 // handle that instead of processing the declarations.
399 if (scope_->HasIllegalRedeclaration()) {
400 Comment cmnt(masm_, "[ illegal redeclarations");
401 scope_->VisitIllegalRedeclaration(this);
402 } else {
403 Comment cmnt(masm_, "[ declarations");
404 ProcessDeclarations(scope_->declarations());
405 // Bail out if a stack-overflow exception occurred when processing
406 // declarations.
407 if (HasStackOverflow()) return;
408 }
409
410 if (FLAG_trace) {
411 frame_->CallRuntime(Runtime::kTraceEnter, 0);
412 // Ignore the return value.
413 }
414 CheckStack();
415
416 // Compile the body of the function in a vanilla state. Don't
417 // bother compiling all the code if the scope has an illegal
418 // redeclaration.
419 if (!scope_->HasIllegalRedeclaration()) {
420 Comment cmnt(masm_, "[ function body");
421#ifdef DEBUG
422 bool is_builtin = Bootstrapper::IsActive();
423 bool should_trace =
424 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
425 if (should_trace) {
426 frame_->CallRuntime(Runtime::kDebugTrace, 0);
427 // Ignore the return value.
428 }
429#endif
430 VisitStatements(body);
431
432 // Handle the return from the function.
433 if (has_valid_frame()) {
434 // If there is a valid frame, control flow can fall off the end of
435 // the body. In that case there is an implicit return statement.
436 ASSERT(!function_return_is_shadowed_);
437 CodeForReturnPosition(function);
438 frame_->PrepareForReturn();
439 Result undefined(Factory::undefined_value());
440 if (function_return_.is_bound()) {
441 function_return_.Jump(&undefined);
442 } else {
443 function_return_.Bind(&undefined);
444 GenerateReturnSequence(&undefined);
445 }
446 } else if (function_return_.is_linked()) {
447 // If the return target has dangling jumps to it, then we have not
448 // yet generated the return sequence. This can happen when (a)
449 // control does not flow off the end of the body so we did not
450 // compile an artificial return statement just above, and (b) there
451 // are return statements in the body but (c) they are all shadowed.
452 Result return_value;
453 function_return_.Bind(&return_value);
454 GenerateReturnSequence(&return_value);
455 }
456 }
457 }
458
459 // Adjust for function-level loop nesting.
460 loop_nesting_ -= function->loop_nesting();
461
462 // Code generation state must be reset.
463 ASSERT(state_ == NULL);
464 ASSERT(loop_nesting() == 0);
465 ASSERT(!function_return_is_shadowed_);
466 function_return_.Unuse();
467 DeleteFrame();
468
469 // Process any deferred code using the register allocator.
470 if (!HasStackOverflow()) {
471 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
472 JumpTarget::set_compiling_deferred_code(true);
473 ProcessDeferred();
474 JumpTarget::set_compiling_deferred_code(false);
475 }
476
477 // There is no need to delete the register allocator, it is a
478 // stack-allocated local.
479 allocator_ = NULL;
480 scope_ = NULL;
481}
482
483void CodeGenerator::GenerateReturnSequence(Result* return_value) {
484 // The return value is a live (but not currently reference counted)
485 // reference to rax. This is safe because the current frame does not
486 // contain a reference to rax (it is prepared for the return by spilling
487 // all registers).
488 if (FLAG_trace) {
489 frame_->Push(return_value);
490 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
491 }
492 return_value->ToRegister(rax);
493
494 // Add a label for checking the size of the code used for returning.
495#ifdef DEBUG
496 Label check_exit_codesize;
497 masm_->bind(&check_exit_codesize);
498#endif
499
500 // Leave the frame and return popping the arguments and the
501 // receiver.
502 frame_->Exit();
503 masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
504#ifdef ENABLE_DEBUGGER_SUPPORT
505 // Add padding that will be overwritten by a debugger breakpoint.
506 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
507 // with length 7 (3 + 1 + 3).
Steve Blockd0582a62009-12-15 09:54:21 +0000508 const int kPadding = Assembler::kJSReturnSequenceLength - 7;
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 for (int i = 0; i < kPadding; ++i) {
510 masm_->int3();
511 }
512 // Check that the size of the code used for returning matches what is
513 // expected by the debugger.
Steve Blockd0582a62009-12-15 09:54:21 +0000514 ASSERT_EQ(Assembler::kJSReturnSequenceLength,
Steve Blocka7e24c12009-10-30 11:49:00 +0000515 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
516#endif
517 DeleteFrame();
518}
519
520
521#ifdef DEBUG
522bool CodeGenerator::HasValidEntryRegisters() {
523 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
524 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
525 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
526 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
527 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
528 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
529 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
530 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
531 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
532 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
533 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
534}
535#endif
536
537
538class DeferredReferenceGetKeyedValue: public DeferredCode {
539 public:
540 explicit DeferredReferenceGetKeyedValue(Register dst,
541 Register receiver,
542 Register key,
543 bool is_global)
544 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
545 set_comment("[ DeferredReferenceGetKeyedValue");
546 }
547
548 virtual void Generate();
549
550 Label* patch_site() { return &patch_site_; }
551
552 private:
553 Label patch_site_;
554 Register dst_;
555 Register receiver_;
556 Register key_;
557 bool is_global_;
558};
559
560
561void DeferredReferenceGetKeyedValue::Generate() {
562 __ push(receiver_); // First IC argument.
563 __ push(key_); // Second IC argument.
564
565 // Calculate the delta from the IC call instruction to the map check
566 // movq instruction in the inlined version. This delta is stored in
567 // a test(rax, delta) instruction after the call so that we can find
568 // it in the IC initialization code and patch the movq instruction.
569 // This means that we cannot allow test instructions after calls to
570 // KeyedLoadIC stubs in other places.
571 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
572 RelocInfo::Mode mode = is_global_
573 ? RelocInfo::CODE_TARGET_CONTEXT
574 : RelocInfo::CODE_TARGET;
575 __ Call(ic, mode);
576 // The delta from the start of the map-compare instruction to the
577 // test instruction. We use masm_-> directly here instead of the __
578 // macro because the macro sometimes uses macro expansion to turn
579 // into something that can't return a value. This is encountered
580 // when doing generated code coverage tests.
581 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
582 // Here we use masm_-> instead of the __ macro because this is the
583 // instruction that gets patched and coverage code gets in the way.
584 // TODO(X64): Consider whether it's worth switching the test to a
585 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
586 // be generated normally.
587 masm_->testl(rax, Immediate(-delta_to_patch_site));
588 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
589
590 if (!dst_.is(rax)) __ movq(dst_, rax);
591 __ pop(key_);
592 __ pop(receiver_);
593}
594
595
596class DeferredReferenceSetKeyedValue: public DeferredCode {
597 public:
598 DeferredReferenceSetKeyedValue(Register value,
599 Register key,
600 Register receiver)
601 : value_(value), key_(key), receiver_(receiver) {
602 set_comment("[ DeferredReferenceSetKeyedValue");
603 }
604
605 virtual void Generate();
606
607 Label* patch_site() { return &patch_site_; }
608
609 private:
610 Register value_;
611 Register key_;
612 Register receiver_;
613 Label patch_site_;
614};
615
616
617void DeferredReferenceSetKeyedValue::Generate() {
618 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
619 // Push receiver and key arguments on the stack.
620 __ push(receiver_);
621 __ push(key_);
622 // Move value argument to eax as expected by the IC stub.
623 if (!value_.is(rax)) __ movq(rax, value_);
624 // Call the IC stub.
625 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
626 __ Call(ic, RelocInfo::CODE_TARGET);
627 // The delta from the start of the map-compare instructions (initial movq)
628 // to the test instruction. We use masm_-> directly here instead of the
629 // __ macro because the macro sometimes uses macro expansion to turn
630 // into something that can't return a value. This is encountered
631 // when doing generated code coverage tests.
632 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
633 // Here we use masm_-> instead of the __ macro because this is the
634 // instruction that gets patched and coverage code gets in the way.
635 masm_->testl(rax, Immediate(-delta_to_patch_site));
636 // Restore value (returned from store IC), key and receiver
637 // registers.
638 if (!value_.is(rax)) __ movq(value_, rax);
639 __ pop(key_);
640 __ pop(receiver_);
641}
642
643
Steve Blocka7e24c12009-10-30 11:49:00 +0000644void CodeGenerator::CallApplyLazy(Property* apply,
645 Expression* receiver,
646 VariableProxy* arguments,
647 int position) {
648 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
649 ASSERT(arguments->IsArguments());
650
651 JumpTarget slow, done;
652
653 // Load the apply function onto the stack. This will usually
654 // give us a megamorphic load site. Not super, but it works.
655 Reference ref(this, apply);
Steve Blockd0582a62009-12-15 09:54:21 +0000656 ref.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000657 ASSERT(ref.type() == Reference::NAMED);
658
659 // Load the receiver and the existing arguments object onto the
660 // expression stack. Avoid allocating the arguments object here.
661 Load(receiver);
662 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
663
664 // Emit the source position information after having loaded the
665 // receiver and the arguments.
666 CodeForSourcePosition(position);
667
668 // Check if the arguments object has been lazily allocated
669 // already. If so, just use that instead of copying the arguments
670 // from the stack. This also deals with cases where a local variable
671 // named 'arguments' has been introduced.
672 frame_->Dup();
673 Result probe = frame_->Pop();
674 bool try_lazy = true;
675 if (probe.is_constant()) {
676 try_lazy = probe.handle()->IsTheHole();
677 } else {
678 __ Cmp(probe.reg(), Factory::the_hole_value());
679 probe.Unuse();
680 slow.Branch(not_equal);
681 }
682
683 if (try_lazy) {
684 JumpTarget build_args;
685
686 // Get rid of the arguments object probe.
687 frame_->Drop();
688
689 // Before messing with the execution stack, we sync all
690 // elements. This is bound to happen anyway because we're
691 // about to call a function.
692 frame_->SyncRange(0, frame_->element_count() - 1);
693
694 // Check that the receiver really is a JavaScript object.
695 {
696 frame_->PushElementAt(0);
697 Result receiver = frame_->Pop();
698 receiver.ToRegister();
699 Condition is_smi = masm_->CheckSmi(receiver.reg());
700 build_args.Branch(is_smi);
701 // We allow all JSObjects including JSFunctions. As long as
702 // JS_FUNCTION_TYPE is the last instance type and it is right
703 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
704 // bound.
705 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
706 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
707 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
708 build_args.Branch(below);
709 }
710
711 // Verify that we're invoking Function.prototype.apply.
712 {
713 frame_->PushElementAt(1);
714 Result apply = frame_->Pop();
715 apply.ToRegister();
716 Condition is_smi = masm_->CheckSmi(apply.reg());
717 build_args.Branch(is_smi);
718 Result tmp = allocator_->Allocate();
719 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
720 build_args.Branch(not_equal);
721 __ movq(tmp.reg(),
722 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
723 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
724 __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
725 apply_code);
726 build_args.Branch(not_equal);
727 }
728
729 // Get the function receiver from the stack. Check that it
730 // really is a function.
731 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
732 Condition is_smi = masm_->CheckSmi(rdi);
733 build_args.Branch(is_smi);
734 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
735 build_args.Branch(not_equal);
736
737 // Copy the arguments to this function possibly from the
738 // adaptor frame below it.
739 Label invoke, adapted;
740 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +0000741 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
742 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +0000743 __ j(equal, &adapted);
744
745 // No arguments adaptor frame. Copy fixed number of arguments.
746 __ movq(rax, Immediate(scope_->num_parameters()));
747 for (int i = 0; i < scope_->num_parameters(); i++) {
748 __ push(frame_->ParameterAt(i));
749 }
750 __ jmp(&invoke);
751
752 // Arguments adaptor frame present. Copy arguments from there, but
753 // avoid copying too many arguments to avoid stack overflows.
754 __ bind(&adapted);
755 static const uint32_t kArgumentsLimit = 1 * KB;
756 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
757 __ SmiToInteger32(rax, rax);
758 __ movq(rcx, rax);
759 __ cmpq(rax, Immediate(kArgumentsLimit));
760 build_args.Branch(above);
761
762 // Loop through the arguments pushing them onto the execution
763 // stack. We don't inform the virtual frame of the push, so we don't
764 // have to worry about getting rid of the elements from the virtual
765 // frame.
766 Label loop;
Steve Blocka7e24c12009-10-30 11:49:00 +0000767 __ testl(rcx, rcx);
768 __ j(zero, &invoke);
Steve Block3ce2e202009-11-05 08:53:23 +0000769 __ bind(&loop);
Steve Blocka7e24c12009-10-30 11:49:00 +0000770 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
771 __ decl(rcx);
Steve Block3ce2e202009-11-05 08:53:23 +0000772 __ j(not_zero, &loop);
Steve Blocka7e24c12009-10-30 11:49:00 +0000773
774 // Invoke the function. The virtual frame knows about the receiver
775 // so make sure to forget that explicitly.
776 __ bind(&invoke);
777 ParameterCount actual(rax);
778 __ InvokeFunction(rdi, actual, CALL_FUNCTION);
779 frame_->Forget(1);
780 Result result = allocator()->Allocate(rax);
781 frame_->SetElementAt(0, &result);
782 done.Jump();
783
784 // Slow-case: Allocate the arguments object since we know it isn't
785 // there, and fall-through to the slow-case where we call
786 // Function.prototype.apply.
787 build_args.Bind();
788 Result arguments_object = StoreArgumentsObject(false);
789 frame_->Push(&arguments_object);
790 slow.Bind();
791 }
792
793 // Flip the apply function and the function to call on the stack, so
794 // the function looks like the receiver of the apply call. This way,
795 // the generic Function.prototype.apply implementation can deal with
796 // the call like it usually does.
797 Result a2 = frame_->Pop();
798 Result a1 = frame_->Pop();
799 Result ap = frame_->Pop();
800 Result fn = frame_->Pop();
801 frame_->Push(&ap);
802 frame_->Push(&fn);
803 frame_->Push(&a1);
804 frame_->Push(&a2);
805 CallFunctionStub call_function(2, NOT_IN_LOOP);
806 Result res = frame_->CallStub(&call_function, 3);
807 frame_->Push(&res);
808
809 // All done. Restore context register after call.
810 if (try_lazy) done.Bind();
811 frame_->RestoreContextRegister();
812}
813
814
815class DeferredStackCheck: public DeferredCode {
816 public:
817 DeferredStackCheck() {
818 set_comment("[ DeferredStackCheck");
819 }
820
821 virtual void Generate();
822};
823
824
825void DeferredStackCheck::Generate() {
826 StackCheckStub stub;
827 __ CallStub(&stub);
828}
829
830
831void CodeGenerator::CheckStack() {
Steve Blockd0582a62009-12-15 09:54:21 +0000832 DeferredStackCheck* deferred = new DeferredStackCheck;
833 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
834 deferred->Branch(below);
835 deferred->BindExit();
Steve Blocka7e24c12009-10-30 11:49:00 +0000836}
837
838
839void CodeGenerator::VisitAndSpill(Statement* statement) {
840 // TODO(X64): No architecture specific code. Move to shared location.
841 ASSERT(in_spilled_code());
842 set_in_spilled_code(false);
843 Visit(statement);
844 if (frame_ != NULL) {
845 frame_->SpillAll();
846 }
847 set_in_spilled_code(true);
848}
849
850
851void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
852 ASSERT(in_spilled_code());
853 set_in_spilled_code(false);
854 VisitStatements(statements);
855 if (frame_ != NULL) {
856 frame_->SpillAll();
857 }
858 set_in_spilled_code(true);
859}
860
861
862void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
863 ASSERT(!in_spilled_code());
864 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
865 Visit(statements->at(i));
866 }
867}
868
869
870void CodeGenerator::VisitBlock(Block* node) {
871 ASSERT(!in_spilled_code());
872 Comment cmnt(masm_, "[ Block");
873 CodeForStatementPosition(node);
874 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
875 VisitStatements(node->statements());
876 if (node->break_target()->is_linked()) {
877 node->break_target()->Bind();
878 }
879 node->break_target()->Unuse();
880}
881
882
883void CodeGenerator::VisitDeclaration(Declaration* node) {
884 Comment cmnt(masm_, "[ Declaration");
885 Variable* var = node->proxy()->var();
886 ASSERT(var != NULL); // must have been resolved
887 Slot* slot = var->slot();
888
889 // If it was not possible to allocate the variable at compile time,
890 // we need to "declare" it at runtime to make sure it actually
891 // exists in the local context.
892 if (slot != NULL && slot->type() == Slot::LOOKUP) {
893 // Variables with a "LOOKUP" slot were introduced as non-locals
894 // during variable resolution and must have mode DYNAMIC.
895 ASSERT(var->is_dynamic());
896 // For now, just do a runtime call. Sync the virtual frame eagerly
897 // so we can simply push the arguments into place.
898 frame_->SyncRange(0, frame_->element_count() - 1);
899 frame_->EmitPush(rsi);
900 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
901 frame_->EmitPush(kScratchRegister);
902 // Declaration nodes are always introduced in one of two modes.
903 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
904 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
Steve Block3ce2e202009-11-05 08:53:23 +0000905 frame_->EmitPush(Smi::FromInt(attr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000906 // Push initial value, if any.
907 // Note: For variables we must not push an initial value (such as
908 // 'undefined') because we may have a (legal) redeclaration and we
909 // must not destroy the current value.
910 if (node->mode() == Variable::CONST) {
911 frame_->EmitPush(Heap::kTheHoleValueRootIndex);
912 } else if (node->fun() != NULL) {
913 Load(node->fun());
914 } else {
Steve Block3ce2e202009-11-05 08:53:23 +0000915 frame_->EmitPush(Smi::FromInt(0)); // no initial value!
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 }
917 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
918 // Ignore the return value (declarations are statements).
919 return;
920 }
921
922 ASSERT(!var->is_global());
923
924 // If we have a function or a constant, we need to initialize the variable.
925 Expression* val = NULL;
926 if (node->mode() == Variable::CONST) {
927 val = new Literal(Factory::the_hole_value());
928 } else {
929 val = node->fun(); // NULL if we don't have a function
930 }
931
932 if (val != NULL) {
933 {
934 // Set the initial value.
935 Reference target(this, node->proxy());
936 Load(val);
937 target.SetValue(NOT_CONST_INIT);
938 // The reference is removed from the stack (preserving TOS) when
939 // it goes out of scope.
940 }
941 // Get rid of the assigned value (declarations are statements).
942 frame_->Drop();
943 }
944}
945
946
947void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
948 ASSERT(!in_spilled_code());
949 Comment cmnt(masm_, "[ ExpressionStatement");
950 CodeForStatementPosition(node);
951 Expression* expression = node->expression();
952 expression->MarkAsStatement();
953 Load(expression);
954 // Remove the lingering expression result from the top of stack.
955 frame_->Drop();
956}
957
958
959void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
960 ASSERT(!in_spilled_code());
961 Comment cmnt(masm_, "// EmptyStatement");
962 CodeForStatementPosition(node);
963 // nothing to do
964}
965
966
967void CodeGenerator::VisitIfStatement(IfStatement* node) {
968 ASSERT(!in_spilled_code());
969 Comment cmnt(masm_, "[ IfStatement");
970 // Generate different code depending on which parts of the if statement
971 // are present or not.
972 bool has_then_stm = node->HasThenStatement();
973 bool has_else_stm = node->HasElseStatement();
974
975 CodeForStatementPosition(node);
976 JumpTarget exit;
977 if (has_then_stm && has_else_stm) {
978 JumpTarget then;
979 JumpTarget else_;
980 ControlDestination dest(&then, &else_, true);
Steve Blockd0582a62009-12-15 09:54:21 +0000981 LoadCondition(node->condition(), &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +0000982
983 if (dest.false_was_fall_through()) {
984 // The else target was bound, so we compile the else part first.
985 Visit(node->else_statement());
986
987 // We may have dangling jumps to the then part.
988 if (then.is_linked()) {
989 if (has_valid_frame()) exit.Jump();
990 then.Bind();
991 Visit(node->then_statement());
992 }
993 } else {
994 // The then target was bound, so we compile the then part first.
995 Visit(node->then_statement());
996
997 if (else_.is_linked()) {
998 if (has_valid_frame()) exit.Jump();
999 else_.Bind();
1000 Visit(node->else_statement());
1001 }
1002 }
1003
1004 } else if (has_then_stm) {
1005 ASSERT(!has_else_stm);
1006 JumpTarget then;
1007 ControlDestination dest(&then, &exit, true);
Steve Blockd0582a62009-12-15 09:54:21 +00001008 LoadCondition(node->condition(), &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001009
1010 if (dest.false_was_fall_through()) {
1011 // The exit label was bound. We may have dangling jumps to the
1012 // then part.
1013 if (then.is_linked()) {
1014 exit.Unuse();
1015 exit.Jump();
1016 then.Bind();
1017 Visit(node->then_statement());
1018 }
1019 } else {
1020 // The then label was bound.
1021 Visit(node->then_statement());
1022 }
1023
1024 } else if (has_else_stm) {
1025 ASSERT(!has_then_stm);
1026 JumpTarget else_;
1027 ControlDestination dest(&exit, &else_, false);
Steve Blockd0582a62009-12-15 09:54:21 +00001028 LoadCondition(node->condition(), &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001029
1030 if (dest.true_was_fall_through()) {
1031 // The exit label was bound. We may have dangling jumps to the
1032 // else part.
1033 if (else_.is_linked()) {
1034 exit.Unuse();
1035 exit.Jump();
1036 else_.Bind();
1037 Visit(node->else_statement());
1038 }
1039 } else {
1040 // The else label was bound.
1041 Visit(node->else_statement());
1042 }
1043
1044 } else {
1045 ASSERT(!has_then_stm && !has_else_stm);
1046 // We only care about the condition's side effects (not its value
1047 // or control flow effect). LoadCondition is called without
1048 // forcing control flow.
1049 ControlDestination dest(&exit, &exit, true);
Steve Blockd0582a62009-12-15 09:54:21 +00001050 LoadCondition(node->condition(), &dest, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001051 if (!dest.is_used()) {
1052 // We got a value on the frame rather than (or in addition to)
1053 // control flow.
1054 frame_->Drop();
1055 }
1056 }
1057
1058 if (exit.is_linked()) {
1059 exit.Bind();
1060 }
1061}
1062
1063
1064void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1065 ASSERT(!in_spilled_code());
1066 Comment cmnt(masm_, "[ ContinueStatement");
1067 CodeForStatementPosition(node);
1068 node->target()->continue_target()->Jump();
1069}
1070
1071
1072void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1073 ASSERT(!in_spilled_code());
1074 Comment cmnt(masm_, "[ BreakStatement");
1075 CodeForStatementPosition(node);
1076 node->target()->break_target()->Jump();
1077}
1078
1079
1080void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1081 ASSERT(!in_spilled_code());
1082 Comment cmnt(masm_, "[ ReturnStatement");
1083
1084 CodeForStatementPosition(node);
1085 Load(node->expression());
1086 Result return_value = frame_->Pop();
1087 if (function_return_is_shadowed_) {
1088 function_return_.Jump(&return_value);
1089 } else {
1090 frame_->PrepareForReturn();
1091 if (function_return_.is_bound()) {
1092 // If the function return label is already bound we reuse the
1093 // code by jumping to the return site.
1094 function_return_.Jump(&return_value);
1095 } else {
1096 function_return_.Bind(&return_value);
1097 GenerateReturnSequence(&return_value);
1098 }
1099 }
1100}
1101
1102
1103void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1104 ASSERT(!in_spilled_code());
1105 Comment cmnt(masm_, "[ WithEnterStatement");
1106 CodeForStatementPosition(node);
1107 Load(node->expression());
1108 Result context;
1109 if (node->is_catch_block()) {
1110 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1111 } else {
1112 context = frame_->CallRuntime(Runtime::kPushContext, 1);
1113 }
1114
1115 // Update context local.
1116 frame_->SaveContextRegister();
1117
1118 // Verify that the runtime call result and rsi agree.
1119 if (FLAG_debug_code) {
1120 __ cmpq(context.reg(), rsi);
1121 __ Assert(equal, "Runtime::NewContext should end up in rsi");
1122 }
1123}
1124
1125
1126void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1127 ASSERT(!in_spilled_code());
1128 Comment cmnt(masm_, "[ WithExitStatement");
1129 CodeForStatementPosition(node);
1130 // Pop context.
1131 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1132 // Update context local.
1133 frame_->SaveContextRegister();
1134}
1135
1136
1137void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1138 // TODO(X64): This code is completely generic and should be moved somewhere
1139 // where it can be shared between architectures.
1140 ASSERT(!in_spilled_code());
1141 Comment cmnt(masm_, "[ SwitchStatement");
1142 CodeForStatementPosition(node);
1143 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1144
1145 // Compile the switch value.
1146 Load(node->tag());
1147
1148 ZoneList<CaseClause*>* cases = node->cases();
1149 int length = cases->length();
1150 CaseClause* default_clause = NULL;
1151
1152 JumpTarget next_test;
1153 // Compile the case label expressions and comparisons. Exit early
1154 // if a comparison is unconditionally true. The target next_test is
1155 // bound before the loop in order to indicate control flow to the
1156 // first comparison.
1157 next_test.Bind();
1158 for (int i = 0; i < length && !next_test.is_unused(); i++) {
1159 CaseClause* clause = cases->at(i);
1160 // The default is not a test, but remember it for later.
1161 if (clause->is_default()) {
1162 default_clause = clause;
1163 continue;
1164 }
1165
1166 Comment cmnt(masm_, "[ Case comparison");
1167 // We recycle the same target next_test for each test. Bind it if
1168 // the previous test has not done so and then unuse it for the
1169 // loop.
1170 if (next_test.is_linked()) {
1171 next_test.Bind();
1172 }
1173 next_test.Unuse();
1174
1175 // Duplicate the switch value.
1176 frame_->Dup();
1177
1178 // Compile the label expression.
1179 Load(clause->label());
1180
1181 // Compare and branch to the body if true or the next test if
1182 // false. Prefer the next test as a fall through.
1183 ControlDestination dest(clause->body_target(), &next_test, false);
1184 Comparison(equal, true, &dest);
1185
1186 // If the comparison fell through to the true target, jump to the
1187 // actual body.
1188 if (dest.true_was_fall_through()) {
1189 clause->body_target()->Unuse();
1190 clause->body_target()->Jump();
1191 }
1192 }
1193
1194 // If there was control flow to a next test from the last one
1195 // compiled, compile a jump to the default or break target.
1196 if (!next_test.is_unused()) {
1197 if (next_test.is_linked()) {
1198 next_test.Bind();
1199 }
1200 // Drop the switch value.
1201 frame_->Drop();
1202 if (default_clause != NULL) {
1203 default_clause->body_target()->Jump();
1204 } else {
1205 node->break_target()->Jump();
1206 }
1207 }
1208
1209 // The last instruction emitted was a jump, either to the default
1210 // clause or the break target, or else to a case body from the loop
1211 // that compiles the tests.
1212 ASSERT(!has_valid_frame());
1213 // Compile case bodies as needed.
1214 for (int i = 0; i < length; i++) {
1215 CaseClause* clause = cases->at(i);
1216
1217 // There are two ways to reach the body: from the corresponding
1218 // test or as the fall through of the previous body.
1219 if (clause->body_target()->is_linked() || has_valid_frame()) {
1220 if (clause->body_target()->is_linked()) {
1221 if (has_valid_frame()) {
1222 // If we have both a jump to the test and a fall through, put
1223 // a jump on the fall through path to avoid the dropping of
1224 // the switch value on the test path. The exception is the
1225 // default which has already had the switch value dropped.
1226 if (clause->is_default()) {
1227 clause->body_target()->Bind();
1228 } else {
1229 JumpTarget body;
1230 body.Jump();
1231 clause->body_target()->Bind();
1232 frame_->Drop();
1233 body.Bind();
1234 }
1235 } else {
1236 // No fall through to worry about.
1237 clause->body_target()->Bind();
1238 if (!clause->is_default()) {
1239 frame_->Drop();
1240 }
1241 }
1242 } else {
1243 // Otherwise, we have only fall through.
1244 ASSERT(has_valid_frame());
1245 }
1246
1247 // We are now prepared to compile the body.
1248 Comment cmnt(masm_, "[ Case body");
1249 VisitStatements(clause->statements());
1250 }
1251 clause->body_target()->Unuse();
1252 }
1253
1254 // We may not have a valid frame here so bind the break target only
1255 // if needed.
1256 if (node->break_target()->is_linked()) {
1257 node->break_target()->Bind();
1258 }
1259 node->break_target()->Unuse();
1260}
1261
1262
Steve Block3ce2e202009-11-05 08:53:23 +00001263void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001264 ASSERT(!in_spilled_code());
Steve Block3ce2e202009-11-05 08:53:23 +00001265 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 CodeForStatementPosition(node);
1267 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001268 JumpTarget body(JumpTarget::BIDIRECTIONAL);
1269 IncrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001270
Steve Block3ce2e202009-11-05 08:53:23 +00001271 ConditionAnalysis info = AnalyzeCondition(node->cond());
1272 // Label the top of the loop for the backward jump if necessary.
1273 switch (info) {
1274 case ALWAYS_TRUE:
1275 // Use the continue target.
1276 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1277 node->continue_target()->Bind();
1278 break;
1279 case ALWAYS_FALSE:
1280 // No need to label it.
1281 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1282 break;
1283 case DONT_KNOW:
1284 // Continue is the test, so use the backward body target.
1285 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1286 body.Bind();
1287 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001288 }
1289
Steve Block3ce2e202009-11-05 08:53:23 +00001290 CheckStack(); // TODO(1222600): ignore if body contains calls.
1291 Visit(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001292
Steve Block3ce2e202009-11-05 08:53:23 +00001293 // Compile the test.
1294 switch (info) {
1295 case ALWAYS_TRUE:
1296 // If control flow can fall off the end of the body, jump back
1297 // to the top and bind the break target at the exit.
1298 if (has_valid_frame()) {
1299 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001300 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001301 if (node->break_target()->is_linked()) {
1302 node->break_target()->Bind();
1303 }
1304 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001305 case ALWAYS_FALSE:
1306 // We may have had continues or breaks in the body.
1307 if (node->continue_target()->is_linked()) {
1308 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001309 }
Steve Block3ce2e202009-11-05 08:53:23 +00001310 if (node->break_target()->is_linked()) {
1311 node->break_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001312 }
Steve Block3ce2e202009-11-05 08:53:23 +00001313 break;
1314 case DONT_KNOW:
1315 // We have to compile the test expression if it can be reached by
1316 // control flow falling out of the body or via continue.
1317 if (node->continue_target()->is_linked()) {
1318 node->continue_target()->Bind();
1319 }
1320 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001321 Comment cmnt(masm_, "[ DoWhileCondition");
1322 CodeForDoWhileConditionPosition(node);
Steve Block3ce2e202009-11-05 08:53:23 +00001323 ControlDestination dest(&body, node->break_target(), false);
Steve Blockd0582a62009-12-15 09:54:21 +00001324 LoadCondition(node->cond(), &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001325 }
Steve Block3ce2e202009-11-05 08:53:23 +00001326 if (node->break_target()->is_linked()) {
1327 node->break_target()->Bind();
1328 }
1329 break;
1330 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001331
Steve Block3ce2e202009-11-05 08:53:23 +00001332 DecrementLoopNesting();
1333 node->continue_target()->Unuse();
1334 node->break_target()->Unuse();
1335}
Steve Blocka7e24c12009-10-30 11:49:00 +00001336
Steve Block3ce2e202009-11-05 08:53:23 +00001337
1338void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1339 ASSERT(!in_spilled_code());
1340 Comment cmnt(masm_, "[ WhileStatement");
1341 CodeForStatementPosition(node);
1342
1343 // If the condition is always false and has no side effects, we do not
1344 // need to compile anything.
1345 ConditionAnalysis info = AnalyzeCondition(node->cond());
1346 if (info == ALWAYS_FALSE) return;
1347
1348 // Do not duplicate conditions that may have function literal
1349 // subexpressions. This can cause us to compile the function literal
1350 // twice.
1351 bool test_at_bottom = !node->may_have_function_literal();
1352 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1353 IncrementLoopNesting();
1354 JumpTarget body;
1355 if (test_at_bottom) {
1356 body.set_direction(JumpTarget::BIDIRECTIONAL);
1357 }
1358
1359 // Based on the condition analysis, compile the test as necessary.
1360 switch (info) {
1361 case ALWAYS_TRUE:
1362 // We will not compile the test expression. Label the top of the
1363 // loop with the continue target.
1364 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1365 node->continue_target()->Bind();
1366 break;
1367 case DONT_KNOW: {
1368 if (test_at_bottom) {
1369 // Continue is the test at the bottom, no need to label the test
1370 // at the top. The body is a backward target.
1371 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1372 } else {
1373 // Label the test at the top as the continue target. The body
1374 // is a forward-only target.
1375 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1376 node->continue_target()->Bind();
1377 }
1378 // Compile the test with the body as the true target and preferred
1379 // fall-through and with the break target as the false target.
1380 ControlDestination dest(&body, node->break_target(), true);
Steve Blockd0582a62009-12-15 09:54:21 +00001381 LoadCondition(node->cond(), &dest, true);
Steve Block3ce2e202009-11-05 08:53:23 +00001382
1383 if (dest.false_was_fall_through()) {
1384 // If we got the break target as fall-through, the test may have
1385 // been unconditionally false (if there are no jumps to the
1386 // body).
1387 if (!body.is_linked()) {
1388 DecrementLoopNesting();
1389 return;
1390 }
1391
1392 // Otherwise, jump around the body on the fall through and then
1393 // bind the body target.
1394 node->break_target()->Unuse();
1395 node->break_target()->Jump();
1396 body.Bind();
1397 }
1398 break;
1399 }
1400 case ALWAYS_FALSE:
1401 UNREACHABLE();
1402 break;
1403 }
1404
1405 CheckStack(); // TODO(1222600): ignore if body contains calls.
1406 Visit(node->body());
1407
1408 // Based on the condition analysis, compile the backward jump as
1409 // necessary.
1410 switch (info) {
1411 case ALWAYS_TRUE:
1412 // The loop body has been labeled with the continue target.
1413 if (has_valid_frame()) {
1414 node->continue_target()->Jump();
1415 }
1416 break;
1417 case DONT_KNOW:
1418 if (test_at_bottom) {
1419 // If we have chosen to recompile the test at the bottom,
1420 // then it is the continue target.
Steve Blocka7e24c12009-10-30 11:49:00 +00001421 if (node->continue_target()->is_linked()) {
1422 node->continue_target()->Bind();
1423 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001424 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001425 // The break target is the fall-through (body is a backward
1426 // jump from here and thus an invalid fall-through).
1427 ControlDestination dest(&body, node->break_target(), false);
Steve Blockd0582a62009-12-15 09:54:21 +00001428 LoadCondition(node->cond(), &dest, true);
Steve Block3ce2e202009-11-05 08:53:23 +00001429 }
1430 } else {
1431 // If we have chosen not to recompile the test at the
1432 // bottom, jump back to the one at the top.
1433 if (has_valid_frame()) {
1434 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001435 }
1436 }
Steve Block3ce2e202009-11-05 08:53:23 +00001437 break;
1438 case ALWAYS_FALSE:
1439 UNREACHABLE();
1440 break;
1441 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001442
Steve Block3ce2e202009-11-05 08:53:23 +00001443 // The break target may be already bound (by the condition), or there
1444 // may not be a valid frame. Bind it only if needed.
1445 if (node->break_target()->is_linked()) {
1446 node->break_target()->Bind();
1447 }
1448 DecrementLoopNesting();
1449}
1450
1451
1452void CodeGenerator::VisitForStatement(ForStatement* node) {
1453 ASSERT(!in_spilled_code());
1454 Comment cmnt(masm_, "[ ForStatement");
1455 CodeForStatementPosition(node);
1456
1457 // Compile the init expression if present.
1458 if (node->init() != NULL) {
1459 Visit(node->init());
1460 }
1461
1462 // If the condition is always false and has no side effects, we do not
1463 // need to compile anything else.
1464 ConditionAnalysis info = AnalyzeCondition(node->cond());
1465 if (info == ALWAYS_FALSE) return;
1466
1467 // Do not duplicate conditions that may have function literal
1468 // subexpressions. This can cause us to compile the function literal
1469 // twice.
1470 bool test_at_bottom = !node->may_have_function_literal();
1471 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1472 IncrementLoopNesting();
1473
1474 // Target for backward edge if no test at the bottom, otherwise
1475 // unused.
1476 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1477
1478 // Target for backward edge if there is a test at the bottom,
1479 // otherwise used as target for test at the top.
1480 JumpTarget body;
1481 if (test_at_bottom) {
1482 body.set_direction(JumpTarget::BIDIRECTIONAL);
1483 }
1484
1485 // Based on the condition analysis, compile the test as necessary.
1486 switch (info) {
1487 case ALWAYS_TRUE:
1488 // We will not compile the test expression. Label the top of the
1489 // loop.
1490 if (node->next() == NULL) {
1491 // Use the continue target if there is no update expression.
1492 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1493 node->continue_target()->Bind();
1494 } else {
1495 // Otherwise use the backward loop target.
1496 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1497 loop.Bind();
1498 }
1499 break;
1500 case DONT_KNOW: {
1501 if (test_at_bottom) {
1502 // Continue is either the update expression or the test at the
1503 // bottom, no need to label the test at the top.
1504 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1505 } else if (node->next() == NULL) {
1506 // We are not recompiling the test at the bottom and there is no
1507 // update expression.
1508 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1509 node->continue_target()->Bind();
1510 } else {
1511 // We are not recompiling the test at the bottom and there is an
1512 // update expression.
1513 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1514 loop.Bind();
1515 }
1516
1517 // Compile the test with the body as the true target and preferred
1518 // fall-through and with the break target as the false target.
1519 ControlDestination dest(&body, node->break_target(), true);
Steve Blockd0582a62009-12-15 09:54:21 +00001520 LoadCondition(node->cond(), &dest, true);
Steve Block3ce2e202009-11-05 08:53:23 +00001521
1522 if (dest.false_was_fall_through()) {
1523 // If we got the break target as fall-through, the test may have
1524 // been unconditionally false (if there are no jumps to the
1525 // body).
1526 if (!body.is_linked()) {
1527 DecrementLoopNesting();
1528 return;
1529 }
1530
1531 // Otherwise, jump around the body on the fall through and then
1532 // bind the body target.
1533 node->break_target()->Unuse();
1534 node->break_target()->Jump();
1535 body.Bind();
1536 }
1537 break;
1538 }
1539 case ALWAYS_FALSE:
1540 UNREACHABLE();
1541 break;
1542 }
1543
1544 CheckStack(); // TODO(1222600): ignore if body contains calls.
1545 Visit(node->body());
1546
1547 // If there is an update expression, compile it if necessary.
1548 if (node->next() != NULL) {
1549 if (node->continue_target()->is_linked()) {
1550 node->continue_target()->Bind();
1551 }
1552
1553 // Control can reach the update by falling out of the body or by a
1554 // continue.
1555 if (has_valid_frame()) {
1556 // Record the source position of the statement as this code which
1557 // is after the code for the body actually belongs to the loop
1558 // statement and not the body.
1559 CodeForStatementPosition(node);
1560 Visit(node->next());
1561 }
1562 }
1563
1564 // Based on the condition analysis, compile the backward jump as
1565 // necessary.
1566 switch (info) {
1567 case ALWAYS_TRUE:
1568 if (has_valid_frame()) {
1569 if (node->next() == NULL) {
1570 node->continue_target()->Jump();
1571 } else {
1572 loop.Jump();
1573 }
1574 }
1575 break;
1576 case DONT_KNOW:
1577 if (test_at_bottom) {
1578 if (node->continue_target()->is_linked()) {
1579 // We can have dangling jumps to the continue target if there
1580 // was no update expression.
1581 node->continue_target()->Bind();
1582 }
1583 // Control can reach the test at the bottom by falling out of
1584 // the body, by a continue in the body, or from the update
1585 // expression.
1586 if (has_valid_frame()) {
1587 // The break target is the fall-through (body is a backward
1588 // jump from here).
1589 ControlDestination dest(&body, node->break_target(), false);
Steve Blockd0582a62009-12-15 09:54:21 +00001590 LoadCondition(node->cond(), &dest, true);
Steve Block3ce2e202009-11-05 08:53:23 +00001591 }
1592 } else {
1593 // Otherwise, jump back to the test at the top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 if (has_valid_frame()) {
1595 if (node->next() == NULL) {
1596 node->continue_target()->Jump();
1597 } else {
1598 loop.Jump();
1599 }
1600 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 }
1602 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001603 case ALWAYS_FALSE:
1604 UNREACHABLE();
1605 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 }
1607
Steve Block3ce2e202009-11-05 08:53:23 +00001608 // The break target may be already bound (by the condition), or there
1609 // may not be a valid frame. Bind it only if needed.
1610 if (node->break_target()->is_linked()) {
1611 node->break_target()->Bind();
1612 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001613 DecrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001614}
1615
1616
1617void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1618 ASSERT(!in_spilled_code());
1619 VirtualFrame::SpilledScope spilled_scope;
1620 Comment cmnt(masm_, "[ ForInStatement");
1621 CodeForStatementPosition(node);
1622
1623 JumpTarget primitive;
1624 JumpTarget jsobject;
1625 JumpTarget fixed_array;
1626 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1627 JumpTarget end_del_check;
1628 JumpTarget exit;
1629
1630 // Get the object to enumerate over (converted to JSObject).
1631 LoadAndSpill(node->enumerable());
1632
1633 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1634 // to the specification. 12.6.4 mandates a call to ToObject.
1635 frame_->EmitPop(rax);
1636
1637 // rax: value to be iterated over
1638 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1639 exit.Branch(equal);
1640 __ CompareRoot(rax, Heap::kNullValueRootIndex);
1641 exit.Branch(equal);
1642
1643 // Stack layout in body:
1644 // [iteration counter (smi)] <- slot 0
1645 // [length of array] <- slot 1
1646 // [FixedArray] <- slot 2
1647 // [Map or 0] <- slot 3
1648 // [Object] <- slot 4
1649
1650 // Check if enumerable is already a JSObject
1651 // rax: value to be iterated over
1652 Condition is_smi = masm_->CheckSmi(rax);
1653 primitive.Branch(is_smi);
1654 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1655 jsobject.Branch(above_equal);
1656
1657 primitive.Bind();
1658 frame_->EmitPush(rax);
1659 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1660 // function call returns the value in rax, which is where we want it below
1661
1662 jsobject.Bind();
1663 // Get the set of properties (as a FixedArray or Map).
1664 // rax: value to be iterated over
Steve Blockd0582a62009-12-15 09:54:21 +00001665 frame_->EmitPush(rax); // Push the object being iterated over.
Steve Blocka7e24c12009-10-30 11:49:00 +00001666
Steve Blockd0582a62009-12-15 09:54:21 +00001667
1668 // Check cache validity in generated code. This is a fast case for
1669 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1670 // guarantee cache validity, call the runtime system to check cache
1671 // validity or get the property names in a fixed array.
1672 JumpTarget call_runtime;
1673 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1674 JumpTarget check_prototype;
1675 JumpTarget use_cache;
1676 __ movq(rcx, rax);
1677 loop.Bind();
1678 // Check that there are no elements.
1679 __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
1680 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1681 call_runtime.Branch(not_equal);
1682 // Check that instance descriptors are not empty so that we can
1683 // check for an enum cache. Leave the map in ebx for the subsequent
1684 // prototype load.
1685 __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
1686 __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
1687 __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
1688 call_runtime.Branch(equal);
1689 // Check that there in an enum cache in the non-empty instance
1690 // descriptors. This is the case if the next enumeration index
1691 // field does not contain a smi.
1692 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
1693 is_smi = masm_->CheckSmi(rdx);
1694 call_runtime.Branch(is_smi);
1695 // For all objects but the receiver, check that the cache is empty.
1696 __ cmpq(rcx, rax);
1697 check_prototype.Branch(equal);
1698 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1699 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1700 call_runtime.Branch(not_equal);
1701 check_prototype.Bind();
1702 // Load the prototype from the map and loop if non-null.
1703 __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
1704 __ CompareRoot(rcx, Heap::kNullValueRootIndex);
1705 loop.Branch(not_equal);
1706 // The enum cache is valid. Load the map of the object being
1707 // iterated over and use the cache for the iteration.
1708 __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
1709 use_cache.Jump();
1710
1711 call_runtime.Bind();
1712 // Call the runtime to get the property names for the object.
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
1714 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1715
1716 // If we got a Map, we can do a fast modification check.
1717 // Otherwise, we got a FixedArray, and we have to do a slow check.
1718 // rax: map or fixed array (result from call to
1719 // Runtime::kGetPropertyNamesFast)
1720 __ movq(rdx, rax);
1721 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1722 __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1723 fixed_array.Branch(not_equal);
1724
Steve Blockd0582a62009-12-15 09:54:21 +00001725 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001726 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001727 // rax: map (either the result from a call to
1728 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1729 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001730 __ movq(rcx, rax);
1731 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1732 // Get the bridge array held in the enumeration index field.
1733 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1734 // Get the cache from the bridge array.
1735 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1736
1737 frame_->EmitPush(rax); // <- slot 3
1738 frame_->EmitPush(rdx); // <- slot 2
1739 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1740 __ Integer32ToSmi(rax, rax);
1741 frame_->EmitPush(rax); // <- slot 1
Steve Block3ce2e202009-11-05 08:53:23 +00001742 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 entry.Jump();
1744
1745 fixed_array.Bind();
1746 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
Steve Block3ce2e202009-11-05 08:53:23 +00001747 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
Steve Blocka7e24c12009-10-30 11:49:00 +00001748 frame_->EmitPush(rax); // <- slot 2
1749
1750 // Push the length of the array and the initial index onto the stack.
1751 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1752 __ Integer32ToSmi(rax, rax);
1753 frame_->EmitPush(rax); // <- slot 1
Steve Block3ce2e202009-11-05 08:53:23 +00001754 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
Steve Blocka7e24c12009-10-30 11:49:00 +00001755
1756 // Condition.
1757 entry.Bind();
1758 // Grab the current frame's height for the break and continue
1759 // targets only after all the state is pushed on the frame.
1760 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1761 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1762
1763 __ movq(rax, frame_->ElementAt(0)); // load the current count
Steve Block3ce2e202009-11-05 08:53:23 +00001764 __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
1765 node->break_target()->Branch(below_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00001766
1767 // Get the i'th entry of the array.
1768 __ movq(rdx, frame_->ElementAt(2));
1769 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1770 __ movq(rbx,
1771 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1772
1773 // Get the expected map from the stack or a zero map in the
1774 // permanent slow case rax: current iteration count rbx: i'th entry
1775 // of the enum cache
1776 __ movq(rdx, frame_->ElementAt(3));
1777 // Check if the expected map still matches that of the enumerable.
1778 // If not, we have to filter the key.
1779 // rax: current iteration count
1780 // rbx: i'th entry of the enum cache
1781 // rdx: expected map value
1782 __ movq(rcx, frame_->ElementAt(4));
1783 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1784 __ cmpq(rcx, rdx);
1785 end_del_check.Branch(equal);
1786
1787 // Convert the entry to a string (or null if it isn't a property anymore).
1788 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
1789 frame_->EmitPush(rbx); // push entry
1790 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1791 __ movq(rbx, rax);
1792
1793 // If the property has been removed while iterating, we just skip it.
1794 __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1795 node->continue_target()->Branch(equal);
1796
1797 end_del_check.Bind();
1798 // Store the entry in the 'each' expression and take another spin in the
1799 // loop. rdx: i'th entry of the enum cache (or string there of)
1800 frame_->EmitPush(rbx);
1801 { Reference each(this, node->each());
1802 // Loading a reference may leave the frame in an unspilled state.
1803 frame_->SpillAll();
1804 if (!each.is_illegal()) {
1805 if (each.size() > 0) {
1806 frame_->EmitPush(frame_->ElementAt(each.size()));
1807 }
1808 // If the reference was to a slot we rely on the convenient property
1809 // that it doesn't matter whether a value (eg, ebx pushed above) is
1810 // right on top of or right underneath a zero-sized reference.
1811 each.SetValue(NOT_CONST_INIT);
1812 if (each.size() > 0) {
1813 // It's safe to pop the value lying on top of the reference before
1814 // unloading the reference itself (which preserves the top of stack,
1815 // ie, now the topmost value of the non-zero sized reference), since
1816 // we will discard the top of stack after unloading the reference
1817 // anyway.
1818 frame_->Drop();
1819 }
1820 }
1821 }
1822 // Unloading a reference may leave the frame in an unspilled state.
1823 frame_->SpillAll();
1824
1825 // Discard the i'th entry pushed above or else the remainder of the
1826 // reference, whichever is currently on top of the stack.
1827 frame_->Drop();
1828
1829 // Body.
1830 CheckStack(); // TODO(1222600): ignore if body contains calls.
1831 VisitAndSpill(node->body());
1832
1833 // Next. Reestablish a spilled frame in case we are coming here via
1834 // a continue in the body.
1835 node->continue_target()->Bind();
1836 frame_->SpillAll();
1837 frame_->EmitPop(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00001838 __ SmiAddConstant(rax, rax, Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001839 frame_->EmitPush(rax);
1840 entry.Jump();
1841
1842 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1843 // any frame.
1844 node->break_target()->Bind();
1845 frame_->Drop(5);
1846
1847 // Exit.
1848 exit.Bind();
1849
1850 node->continue_target()->Unuse();
1851 node->break_target()->Unuse();
1852}
1853
Steve Block3ce2e202009-11-05 08:53:23 +00001854void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001855 ASSERT(!in_spilled_code());
1856 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001857 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 CodeForStatementPosition(node);
1859
1860 JumpTarget try_block;
1861 JumpTarget exit;
1862
1863 try_block.Call();
1864 // --- Catch block ---
1865 frame_->EmitPush(rax);
1866
1867 // Store the caught exception in the catch variable.
1868 { Reference ref(this, node->catch_var());
1869 ASSERT(ref.is_slot());
1870 // Load the exception to the top of the stack. Here we make use of the
1871 // convenient property that it doesn't matter whether a value is
1872 // immediately on top of or underneath a zero-sized reference.
1873 ref.SetValue(NOT_CONST_INIT);
1874 }
1875
1876 // Remove the exception from the stack.
1877 frame_->Drop();
1878
1879 VisitStatementsAndSpill(node->catch_block()->statements());
1880 if (has_valid_frame()) {
1881 exit.Jump();
1882 }
1883
1884
1885 // --- Try block ---
1886 try_block.Bind();
1887
1888 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1889 int handler_height = frame_->height();
1890
1891 // Shadow the jump targets for all escapes from the try block, including
1892 // returns. During shadowing, the original target is hidden as the
1893 // ShadowTarget and operations on the original actually affect the
1894 // shadowing target.
1895 //
1896 // We should probably try to unify the escaping targets and the return
1897 // target.
1898 int nof_escapes = node->escaping_targets()->length();
1899 List<ShadowTarget*> shadows(1 + nof_escapes);
1900
1901 // Add the shadow target for the function return.
1902 static const int kReturnShadowIndex = 0;
1903 shadows.Add(new ShadowTarget(&function_return_));
1904 bool function_return_was_shadowed = function_return_is_shadowed_;
1905 function_return_is_shadowed_ = true;
1906 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1907
1908 // Add the remaining shadow targets.
1909 for (int i = 0; i < nof_escapes; i++) {
1910 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1911 }
1912
1913 // Generate code for the statements in the try block.
1914 VisitStatementsAndSpill(node->try_block()->statements());
1915
1916 // Stop the introduced shadowing and count the number of required unlinks.
1917 // After shadowing stops, the original targets are unshadowed and the
1918 // ShadowTargets represent the formerly shadowing targets.
1919 bool has_unlinks = false;
1920 for (int i = 0; i < shadows.length(); i++) {
1921 shadows[i]->StopShadowing();
1922 has_unlinks = has_unlinks || shadows[i]->is_linked();
1923 }
1924 function_return_is_shadowed_ = function_return_was_shadowed;
1925
1926 // Get an external reference to the handler address.
1927 ExternalReference handler_address(Top::k_handler_address);
1928
1929 // Make sure that there's nothing left on the stack above the
1930 // handler structure.
1931 if (FLAG_debug_code) {
1932 __ movq(kScratchRegister, handler_address);
1933 __ cmpq(rsp, Operand(kScratchRegister, 0));
1934 __ Assert(equal, "stack pointer should point to top handler");
1935 }
1936
1937 // If we can fall off the end of the try block, unlink from try chain.
1938 if (has_valid_frame()) {
1939 // The next handler address is on top of the frame. Unlink from
1940 // the handler list and drop the rest of this handler from the
1941 // frame.
1942 ASSERT(StackHandlerConstants::kNextOffset == 0);
1943 __ movq(kScratchRegister, handler_address);
1944 frame_->EmitPop(Operand(kScratchRegister, 0));
1945 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1946 if (has_unlinks) {
1947 exit.Jump();
1948 }
1949 }
1950
1951 // Generate unlink code for the (formerly) shadowing targets that
1952 // have been jumped to. Deallocate each shadow target.
1953 Result return_value;
1954 for (int i = 0; i < shadows.length(); i++) {
1955 if (shadows[i]->is_linked()) {
1956 // Unlink from try chain; be careful not to destroy the TOS if
1957 // there is one.
1958 if (i == kReturnShadowIndex) {
1959 shadows[i]->Bind(&return_value);
1960 return_value.ToRegister(rax);
1961 } else {
1962 shadows[i]->Bind();
1963 }
1964 // Because we can be jumping here (to spilled code) from
1965 // unspilled code, we need to reestablish a spilled frame at
1966 // this block.
1967 frame_->SpillAll();
1968
1969 // Reload sp from the top handler, because some statements that we
1970 // break from (eg, for...in) may have left stuff on the stack.
1971 __ movq(kScratchRegister, handler_address);
1972 __ movq(rsp, Operand(kScratchRegister, 0));
1973 frame_->Forget(frame_->height() - handler_height);
1974
1975 ASSERT(StackHandlerConstants::kNextOffset == 0);
1976 __ movq(kScratchRegister, handler_address);
1977 frame_->EmitPop(Operand(kScratchRegister, 0));
1978 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1979
1980 if (i == kReturnShadowIndex) {
1981 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
1982 shadows[i]->other_target()->Jump(&return_value);
1983 } else {
1984 shadows[i]->other_target()->Jump();
1985 }
1986 }
1987 }
1988
1989 exit.Bind();
1990}
1991
1992
Steve Block3ce2e202009-11-05 08:53:23 +00001993void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001994 ASSERT(!in_spilled_code());
1995 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001996 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001997 CodeForStatementPosition(node);
1998
1999 // State: Used to keep track of reason for entering the finally
2000 // block. Should probably be extended to hold information for
2001 // break/continue from within the try block.
2002 enum { FALLING, THROWING, JUMPING };
2003
2004 JumpTarget try_block;
2005 JumpTarget finally_block;
2006
2007 try_block.Call();
2008
2009 frame_->EmitPush(rax);
2010 // In case of thrown exceptions, this is where we continue.
Steve Block3ce2e202009-11-05 08:53:23 +00002011 __ Move(rcx, Smi::FromInt(THROWING));
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 finally_block.Jump();
2013
2014 // --- Try block ---
2015 try_block.Bind();
2016
2017 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2018 int handler_height = frame_->height();
2019
2020 // Shadow the jump targets for all escapes from the try block, including
2021 // returns. During shadowing, the original target is hidden as the
2022 // ShadowTarget and operations on the original actually affect the
2023 // shadowing target.
2024 //
2025 // We should probably try to unify the escaping targets and the return
2026 // target.
2027 int nof_escapes = node->escaping_targets()->length();
2028 List<ShadowTarget*> shadows(1 + nof_escapes);
2029
2030 // Add the shadow target for the function return.
2031 static const int kReturnShadowIndex = 0;
2032 shadows.Add(new ShadowTarget(&function_return_));
2033 bool function_return_was_shadowed = function_return_is_shadowed_;
2034 function_return_is_shadowed_ = true;
2035 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2036
2037 // Add the remaining shadow targets.
2038 for (int i = 0; i < nof_escapes; i++) {
2039 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2040 }
2041
2042 // Generate code for the statements in the try block.
2043 VisitStatementsAndSpill(node->try_block()->statements());
2044
2045 // Stop the introduced shadowing and count the number of required unlinks.
2046 // After shadowing stops, the original targets are unshadowed and the
2047 // ShadowTargets represent the formerly shadowing targets.
2048 int nof_unlinks = 0;
2049 for (int i = 0; i < shadows.length(); i++) {
2050 shadows[i]->StopShadowing();
2051 if (shadows[i]->is_linked()) nof_unlinks++;
2052 }
2053 function_return_is_shadowed_ = function_return_was_shadowed;
2054
2055 // Get an external reference to the handler address.
2056 ExternalReference handler_address(Top::k_handler_address);
2057
2058 // If we can fall off the end of the try block, unlink from the try
2059 // chain and set the state on the frame to FALLING.
2060 if (has_valid_frame()) {
2061 // The next handler address is on top of the frame.
2062 ASSERT(StackHandlerConstants::kNextOffset == 0);
2063 __ movq(kScratchRegister, handler_address);
2064 frame_->EmitPop(Operand(kScratchRegister, 0));
2065 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2066
2067 // Fake a top of stack value (unneeded when FALLING) and set the
2068 // state in ecx, then jump around the unlink blocks if any.
2069 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
Steve Block3ce2e202009-11-05 08:53:23 +00002070 __ Move(rcx, Smi::FromInt(FALLING));
Steve Blocka7e24c12009-10-30 11:49:00 +00002071 if (nof_unlinks > 0) {
2072 finally_block.Jump();
2073 }
2074 }
2075
2076 // Generate code to unlink and set the state for the (formerly)
2077 // shadowing targets that have been jumped to.
2078 for (int i = 0; i < shadows.length(); i++) {
2079 if (shadows[i]->is_linked()) {
2080 // If we have come from the shadowed return, the return value is
2081 // on the virtual frame. We must preserve it until it is
2082 // pushed.
2083 if (i == kReturnShadowIndex) {
2084 Result return_value;
2085 shadows[i]->Bind(&return_value);
2086 return_value.ToRegister(rax);
2087 } else {
2088 shadows[i]->Bind();
2089 }
2090 // Because we can be jumping here (to spilled code) from
2091 // unspilled code, we need to reestablish a spilled frame at
2092 // this block.
2093 frame_->SpillAll();
2094
2095 // Reload sp from the top handler, because some statements that
2096 // we break from (eg, for...in) may have left stuff on the
2097 // stack.
2098 __ movq(kScratchRegister, handler_address);
2099 __ movq(rsp, Operand(kScratchRegister, 0));
2100 frame_->Forget(frame_->height() - handler_height);
2101
2102 // Unlink this handler and drop it from the frame.
2103 ASSERT(StackHandlerConstants::kNextOffset == 0);
2104 __ movq(kScratchRegister, handler_address);
2105 frame_->EmitPop(Operand(kScratchRegister, 0));
2106 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2107
2108 if (i == kReturnShadowIndex) {
2109 // If this target shadowed the function return, materialize
2110 // the return value on the stack.
2111 frame_->EmitPush(rax);
2112 } else {
2113 // Fake TOS for targets that shadowed breaks and continues.
2114 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2115 }
Steve Block3ce2e202009-11-05 08:53:23 +00002116 __ Move(rcx, Smi::FromInt(JUMPING + i));
Steve Blocka7e24c12009-10-30 11:49:00 +00002117 if (--nof_unlinks > 0) {
2118 // If this is not the last unlink block, jump around the next.
2119 finally_block.Jump();
2120 }
2121 }
2122 }
2123
2124 // --- Finally block ---
2125 finally_block.Bind();
2126
2127 // Push the state on the stack.
2128 frame_->EmitPush(rcx);
2129
2130 // We keep two elements on the stack - the (possibly faked) result
2131 // and the state - while evaluating the finally block.
2132 //
2133 // Generate code for the statements in the finally block.
2134 VisitStatementsAndSpill(node->finally_block()->statements());
2135
2136 if (has_valid_frame()) {
2137 // Restore state and return value or faked TOS.
2138 frame_->EmitPop(rcx);
2139 frame_->EmitPop(rax);
2140 }
2141
2142 // Generate code to jump to the right destination for all used
2143 // formerly shadowing targets. Deallocate each shadow target.
2144 for (int i = 0; i < shadows.length(); i++) {
2145 if (has_valid_frame() && shadows[i]->is_bound()) {
2146 BreakTarget* original = shadows[i]->other_target();
Steve Block3ce2e202009-11-05 08:53:23 +00002147 __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
Steve Blocka7e24c12009-10-30 11:49:00 +00002148 if (i == kReturnShadowIndex) {
2149 // The return value is (already) in rax.
2150 Result return_value = allocator_->Allocate(rax);
2151 ASSERT(return_value.is_valid());
2152 if (function_return_is_shadowed_) {
2153 original->Branch(equal, &return_value);
2154 } else {
2155 // Branch around the preparation for return which may emit
2156 // code.
2157 JumpTarget skip;
2158 skip.Branch(not_equal);
2159 frame_->PrepareForReturn();
2160 original->Jump(&return_value);
2161 skip.Bind();
2162 }
2163 } else {
2164 original->Branch(equal);
2165 }
2166 }
2167 }
2168
2169 if (has_valid_frame()) {
2170 // Check if we need to rethrow the exception.
2171 JumpTarget exit;
Steve Block3ce2e202009-11-05 08:53:23 +00002172 __ SmiCompare(rcx, Smi::FromInt(THROWING));
Steve Blocka7e24c12009-10-30 11:49:00 +00002173 exit.Branch(not_equal);
2174
2175 // Rethrow exception.
2176 frame_->EmitPush(rax); // undo pop from above
2177 frame_->CallRuntime(Runtime::kReThrow, 1);
2178
2179 // Done.
2180 exit.Bind();
2181 }
2182}
2183
2184
2185void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2186 ASSERT(!in_spilled_code());
2187 Comment cmnt(masm_, "[ DebuggerStatement");
2188 CodeForStatementPosition(node);
2189#ifdef ENABLE_DEBUGGER_SUPPORT
2190 // Spill everything, even constants, to the frame.
2191 frame_->SpillAll();
2192 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2193 // Ignore the return value.
2194#endif
2195}
2196
2197
2198void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2199 // Call the runtime to instantiate the function boilerplate object.
2200 // The inevitable call will sync frame elements to memory anyway, so
2201 // we do it eagerly to allow us to push the arguments directly into
2202 // place.
2203 ASSERT(boilerplate->IsBoilerplate());
2204 frame_->SyncRange(0, frame_->element_count() - 1);
2205
Steve Blocka7e24c12009-10-30 11:49:00 +00002206 // Create a new closure.
2207 frame_->EmitPush(rsi);
Steve Block3ce2e202009-11-05 08:53:23 +00002208 __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
2209 frame_->EmitPush(kScratchRegister);
Steve Blocka7e24c12009-10-30 11:49:00 +00002210 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2211 frame_->Push(&result);
2212}
2213
2214
2215void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2216 Comment cmnt(masm_, "[ FunctionLiteral");
2217
2218 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002219 Handle<JSFunction> boilerplate =
2220 Compiler::BuildBoilerplate(node, script_, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002221 // Check for stack-overflow exception.
2222 if (HasStackOverflow()) return;
2223 InstantiateBoilerplate(boilerplate);
2224}
2225
2226
2227void CodeGenerator::VisitFunctionBoilerplateLiteral(
2228 FunctionBoilerplateLiteral* node) {
2229 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2230 InstantiateBoilerplate(node->boilerplate());
2231}
2232
2233
2234void CodeGenerator::VisitConditional(Conditional* node) {
2235 Comment cmnt(masm_, "[ Conditional");
2236 JumpTarget then;
2237 JumpTarget else_;
2238 JumpTarget exit;
2239 ControlDestination dest(&then, &else_, true);
Steve Blockd0582a62009-12-15 09:54:21 +00002240 LoadCondition(node->condition(), &dest, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002241
2242 if (dest.false_was_fall_through()) {
2243 // The else target was bound, so we compile the else part first.
Steve Blockd0582a62009-12-15 09:54:21 +00002244 Load(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002245
2246 if (then.is_linked()) {
2247 exit.Jump();
2248 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002249 Load(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002250 }
2251 } else {
2252 // The then target was bound, so we compile the then part first.
Steve Blockd0582a62009-12-15 09:54:21 +00002253 Load(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002254
2255 if (else_.is_linked()) {
2256 exit.Jump();
2257 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002258 Load(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002259 }
2260 }
2261
2262 exit.Bind();
2263}
2264
2265
2266void CodeGenerator::VisitSlot(Slot* node) {
2267 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002268 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002269}
2270
2271
2272void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2273 Comment cmnt(masm_, "[ VariableProxy");
2274 Variable* var = node->var();
2275 Expression* expr = var->rewrite();
2276 if (expr != NULL) {
2277 Visit(expr);
2278 } else {
2279 ASSERT(var->is_global());
2280 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002281 ref.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00002282 }
2283}
2284
2285
2286void CodeGenerator::VisitLiteral(Literal* node) {
2287 Comment cmnt(masm_, "[ Literal");
2288 frame_->Push(node->handle());
2289}
2290
2291
2292// Materialize the regexp literal 'node' in the literals array
2293// 'literals' of the function. Leave the regexp boilerplate in
2294// 'boilerplate'.
2295class DeferredRegExpLiteral: public DeferredCode {
2296 public:
2297 DeferredRegExpLiteral(Register boilerplate,
2298 Register literals,
2299 RegExpLiteral* node)
2300 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2301 set_comment("[ DeferredRegExpLiteral");
2302 }
2303
2304 void Generate();
2305
2306 private:
2307 Register boilerplate_;
2308 Register literals_;
2309 RegExpLiteral* node_;
2310};
2311
2312
2313void DeferredRegExpLiteral::Generate() {
2314 // Since the entry is undefined we call the runtime system to
2315 // compute the literal.
2316 // Literal array (0).
2317 __ push(literals_);
2318 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002319 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002320 // RegExp pattern (2).
2321 __ Push(node_->pattern());
2322 // RegExp flags (3).
2323 __ Push(node_->flags());
2324 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2325 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2326}
2327
2328
2329void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2330 Comment cmnt(masm_, "[ RegExp Literal");
2331
2332 // Retrieve the literals array and check the allocated entry. Begin
2333 // with a writable copy of the function of this activation in a
2334 // register.
2335 frame_->PushFunction();
2336 Result literals = frame_->Pop();
2337 literals.ToRegister();
2338 frame_->Spill(literals.reg());
2339
2340 // Load the literals array of the function.
2341 __ movq(literals.reg(),
2342 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2343
2344 // Load the literal at the ast saved index.
2345 Result boilerplate = allocator_->Allocate();
2346 ASSERT(boilerplate.is_valid());
2347 int literal_offset =
2348 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2349 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2350
2351 // Check whether we need to materialize the RegExp object. If so,
2352 // jump to the deferred code passing the literals array.
2353 DeferredRegExpLiteral* deferred =
2354 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2355 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2356 deferred->Branch(equal);
2357 deferred->BindExit();
2358 literals.Unuse();
2359
2360 // Push the boilerplate object.
2361 frame_->Push(&boilerplate);
2362}
2363
2364
2365// Materialize the object literal 'node' in the literals array
2366// 'literals' of the function. Leave the object boilerplate in
2367// 'boilerplate'.
2368class DeferredObjectLiteral: public DeferredCode {
2369 public:
2370 DeferredObjectLiteral(Register boilerplate,
2371 Register literals,
2372 ObjectLiteral* node)
2373 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2374 set_comment("[ DeferredObjectLiteral");
2375 }
2376
2377 void Generate();
2378
2379 private:
2380 Register boilerplate_;
2381 Register literals_;
2382 ObjectLiteral* node_;
2383};
2384
2385
2386void DeferredObjectLiteral::Generate() {
2387 // Since the entry is undefined we call the runtime system to
2388 // compute the literal.
2389 // Literal array (0).
2390 __ push(literals_);
2391 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002392 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002393 // Constant properties (2).
2394 __ Push(node_->constant_properties());
2395 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
2396 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2397}
2398
2399
2400void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2401 Comment cmnt(masm_, "[ ObjectLiteral");
2402
2403 // Retrieve the literals array and check the allocated entry. Begin
2404 // with a writable copy of the function of this activation in a
2405 // register.
2406 frame_->PushFunction();
2407 Result literals = frame_->Pop();
2408 literals.ToRegister();
2409 frame_->Spill(literals.reg());
2410
2411 // Load the literals array of the function.
2412 __ movq(literals.reg(),
2413 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2414
2415 // Load the literal at the ast saved index.
2416 Result boilerplate = allocator_->Allocate();
2417 ASSERT(boilerplate.is_valid());
2418 int literal_offset =
2419 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2420 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2421
2422 // Check whether we need to materialize the object literal boilerplate.
2423 // If so, jump to the deferred code passing the literals array.
2424 DeferredObjectLiteral* deferred =
2425 new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
2426 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2427 deferred->Branch(equal);
2428 deferred->BindExit();
2429 literals.Unuse();
2430
2431 // Push the boilerplate object.
2432 frame_->Push(&boilerplate);
2433 // Clone the boilerplate object.
2434 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2435 if (node->depth() == 1) {
2436 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2437 }
2438 Result clone = frame_->CallRuntime(clone_function_id, 1);
2439 // Push the newly cloned literal object as the result.
2440 frame_->Push(&clone);
2441
2442 for (int i = 0; i < node->properties()->length(); i++) {
2443 ObjectLiteral::Property* property = node->properties()->at(i);
2444 switch (property->kind()) {
2445 case ObjectLiteral::Property::CONSTANT:
2446 break;
2447 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2448 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2449 // else fall through.
2450 case ObjectLiteral::Property::COMPUTED: {
2451 Handle<Object> key(property->key()->handle());
2452 if (key->IsSymbol()) {
2453 // Duplicate the object as the IC receiver.
2454 frame_->Dup();
2455 Load(property->value());
2456 frame_->Push(key);
2457 Result ignored = frame_->CallStoreIC();
2458 // Drop the duplicated receiver and ignore the result.
2459 frame_->Drop();
2460 break;
2461 }
2462 // Fall through
2463 }
2464 case ObjectLiteral::Property::PROTOTYPE: {
2465 // Duplicate the object as an argument to the runtime call.
2466 frame_->Dup();
2467 Load(property->key());
2468 Load(property->value());
2469 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2470 // Ignore the result.
2471 break;
2472 }
2473 case ObjectLiteral::Property::SETTER: {
2474 // Duplicate the object as an argument to the runtime call.
2475 frame_->Dup();
2476 Load(property->key());
2477 frame_->Push(Smi::FromInt(1));
2478 Load(property->value());
2479 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2480 // Ignore the result.
2481 break;
2482 }
2483 case ObjectLiteral::Property::GETTER: {
2484 // Duplicate the object as an argument to the runtime call.
2485 frame_->Dup();
2486 Load(property->key());
2487 frame_->Push(Smi::FromInt(0));
2488 Load(property->value());
2489 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2490 // Ignore the result.
2491 break;
2492 }
2493 default: UNREACHABLE();
2494 }
2495 }
2496}
2497
2498
2499// Materialize the array literal 'node' in the literals array 'literals'
2500// of the function. Leave the array boilerplate in 'boilerplate'.
2501class DeferredArrayLiteral: public DeferredCode {
2502 public:
2503 DeferredArrayLiteral(Register boilerplate,
2504 Register literals,
2505 ArrayLiteral* node)
2506 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2507 set_comment("[ DeferredArrayLiteral");
2508 }
2509
2510 void Generate();
2511
2512 private:
2513 Register boilerplate_;
2514 Register literals_;
2515 ArrayLiteral* node_;
2516};
2517
2518
2519void DeferredArrayLiteral::Generate() {
2520 // Since the entry is undefined we call the runtime system to
2521 // compute the literal.
2522 // Literal array (0).
2523 __ push(literals_);
2524 // Literal index (1).
Steve Block3ce2e202009-11-05 08:53:23 +00002525 __ Push(Smi::FromInt(node_->literal_index()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002526 // Constant properties (2).
2527 __ Push(node_->literals());
2528 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
2529 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2530}
2531
2532
2533void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2534 Comment cmnt(masm_, "[ ArrayLiteral");
2535
2536 // Retrieve the literals array and check the allocated entry. Begin
2537 // with a writable copy of the function of this activation in a
2538 // register.
2539 frame_->PushFunction();
2540 Result literals = frame_->Pop();
2541 literals.ToRegister();
2542 frame_->Spill(literals.reg());
2543
2544 // Load the literals array of the function.
2545 __ movq(literals.reg(),
2546 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2547
2548 // Load the literal at the ast saved index.
2549 Result boilerplate = allocator_->Allocate();
2550 ASSERT(boilerplate.is_valid());
2551 int literal_offset =
2552 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2553 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2554
2555 // Check whether we need to materialize the object literal boilerplate.
2556 // If so, jump to the deferred code passing the literals array.
2557 DeferredArrayLiteral* deferred =
2558 new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
2559 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2560 deferred->Branch(equal);
2561 deferred->BindExit();
2562 literals.Unuse();
2563
2564 // Push the resulting array literal boilerplate on the stack.
2565 frame_->Push(&boilerplate);
2566 // Clone the boilerplate object.
2567 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2568 if (node->depth() == 1) {
2569 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2570 }
2571 Result clone = frame_->CallRuntime(clone_function_id, 1);
2572 // Push the newly cloned literal object as the result.
2573 frame_->Push(&clone);
2574
2575 // Generate code to set the elements in the array that are not
2576 // literals.
2577 for (int i = 0; i < node->values()->length(); i++) {
2578 Expression* value = node->values()->at(i);
2579
2580 // If value is a literal the property value is already set in the
2581 // boilerplate object.
2582 if (value->AsLiteral() != NULL) continue;
2583 // If value is a materialized literal the property value is already set
2584 // in the boilerplate object if it is simple.
2585 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2586
2587 // The property must be set by generated code.
2588 Load(value);
2589
2590 // Get the property value off the stack.
2591 Result prop_value = frame_->Pop();
2592 prop_value.ToRegister();
2593
2594 // Fetch the array literal while leaving a copy on the stack and
2595 // use it to get the elements array.
2596 frame_->Dup();
2597 Result elements = frame_->Pop();
2598 elements.ToRegister();
2599 frame_->Spill(elements.reg());
2600 // Get the elements FixedArray.
2601 __ movq(elements.reg(),
2602 FieldOperand(elements.reg(), JSObject::kElementsOffset));
2603
2604 // Write to the indexed properties array.
2605 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2606 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2607
2608 // Update the write barrier for the array address.
2609 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
2610 Result scratch = allocator_->Allocate();
2611 ASSERT(scratch.is_valid());
2612 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2613 }
2614}
2615
2616
2617void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2618 ASSERT(!in_spilled_code());
2619 // Call runtime routine to allocate the catch extension object and
2620 // assign the exception value to the catch variable.
2621 Comment cmnt(masm_, "[ CatchExtensionObject");
2622 Load(node->key());
2623 Load(node->value());
2624 Result result =
2625 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2626 frame_->Push(&result);
2627}
2628
2629
2630void CodeGenerator::VisitAssignment(Assignment* node) {
2631 Comment cmnt(masm_, "[ Assignment");
2632
2633 { Reference target(this, node->target());
2634 if (target.is_illegal()) {
2635 // Fool the virtual frame into thinking that we left the assignment's
2636 // value on the frame.
2637 frame_->Push(Smi::FromInt(0));
2638 return;
2639 }
2640 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2641
2642 if (node->starts_initialization_block()) {
2643 ASSERT(target.type() == Reference::NAMED ||
2644 target.type() == Reference::KEYED);
2645 // Change to slow case in the beginning of an initialization
2646 // block to avoid the quadratic behavior of repeatedly adding
2647 // fast properties.
2648
2649 // The receiver is the argument to the runtime call. It is the
2650 // first value pushed when the reference was loaded to the
2651 // frame.
2652 frame_->PushElementAt(target.size() - 1);
2653 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2654 }
2655 if (node->op() == Token::ASSIGN ||
2656 node->op() == Token::INIT_VAR ||
2657 node->op() == Token::INIT_CONST) {
2658 Load(node->value());
2659
2660 } else {
2661 Literal* literal = node->value()->AsLiteral();
2662 bool overwrite_value =
2663 (node->value()->AsBinaryOperation() != NULL &&
2664 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2665 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2666 // There are two cases where the target is not read in the right hand
2667 // side, that are easy to test for: the right hand side is a literal,
2668 // or the right hand side is a different variable. TakeValue invalidates
2669 // the target, with an implicit promise that it will be written to again
2670 // before it is read.
2671 if (literal != NULL || (right_var != NULL && right_var != var)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002672 target.TakeValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00002673 } else {
Steve Blockd0582a62009-12-15 09:54:21 +00002674 target.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00002675 }
2676 Load(node->value());
2677 GenericBinaryOperation(node->binary_op(),
2678 node->type(),
2679 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2680 }
2681
2682 if (var != NULL &&
2683 var->mode() == Variable::CONST &&
2684 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2685 // Assignment ignored - leave the value on the stack.
2686 } else {
2687 CodeForSourcePosition(node->position());
2688 if (node->op() == Token::INIT_CONST) {
2689 // Dynamic constant initializations must use the function context
2690 // and initialize the actual constant declared. Dynamic variable
2691 // initializations are simply assignments and use SetValue.
2692 target.SetValue(CONST_INIT);
2693 } else {
2694 target.SetValue(NOT_CONST_INIT);
2695 }
2696 if (node->ends_initialization_block()) {
2697 ASSERT(target.type() == Reference::NAMED ||
2698 target.type() == Reference::KEYED);
2699 // End of initialization block. Revert to fast case. The
2700 // argument to the runtime call is the receiver, which is the
2701 // first value pushed as part of the reference, which is below
2702 // the lhs value.
2703 frame_->PushElementAt(target.size());
2704 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2705 }
2706 }
2707 }
2708}
2709
2710
2711void CodeGenerator::VisitThrow(Throw* node) {
2712 Comment cmnt(masm_, "[ Throw");
2713 Load(node->exception());
2714 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2715 frame_->Push(&result);
2716}
2717
2718
2719void CodeGenerator::VisitProperty(Property* node) {
2720 Comment cmnt(masm_, "[ Property");
2721 Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002722 property.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00002723}
2724
2725
2726void CodeGenerator::VisitCall(Call* node) {
2727 Comment cmnt(masm_, "[ Call");
2728
2729 ZoneList<Expression*>* args = node->arguments();
2730
2731 // Check if the function is a variable or a property.
2732 Expression* function = node->expression();
2733 Variable* var = function->AsVariableProxy()->AsVariable();
2734 Property* property = function->AsProperty();
2735
2736 // ------------------------------------------------------------------------
2737 // Fast-case: Use inline caching.
2738 // ---
2739 // According to ECMA-262, section 11.2.3, page 44, the function to call
2740 // must be resolved after the arguments have been evaluated. The IC code
2741 // automatically handles this by loading the arguments before the function
2742 // is resolved in cache misses (this also holds for megamorphic calls).
2743 // ------------------------------------------------------------------------
2744
2745 if (var != NULL && var->is_possibly_eval()) {
2746 // ----------------------------------
2747 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2748 // ----------------------------------
2749
2750 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2751 // resolve the function we need to call and the receiver of the
2752 // call. Then we call the resolved function using the given
2753 // arguments.
2754
2755 // Prepare the stack for the call to the resolved function.
2756 Load(function);
2757
2758 // Allocate a frame slot for the receiver.
2759 frame_->Push(Factory::undefined_value());
2760 int arg_count = args->length();
2761 for (int i = 0; i < arg_count; i++) {
2762 Load(args->at(i));
2763 }
2764
2765 // Prepare the stack for the call to ResolvePossiblyDirectEval.
2766 frame_->PushElementAt(arg_count + 1);
2767 if (arg_count > 0) {
2768 frame_->PushElementAt(arg_count);
2769 } else {
2770 frame_->Push(Factory::undefined_value());
2771 }
2772
2773 // Resolve the call.
2774 Result result =
2775 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
2776
2777 // Touch up the stack with the right values for the function and the
2778 // receiver. Use a scratch register to avoid destroying the result.
2779 Result scratch = allocator_->Allocate();
2780 ASSERT(scratch.is_valid());
2781 __ movq(scratch.reg(),
2782 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
2783 frame_->SetElementAt(arg_count + 1, &scratch);
2784
2785 // We can reuse the result register now.
2786 frame_->Spill(result.reg());
2787 __ movq(result.reg(),
2788 FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
2789 frame_->SetElementAt(arg_count, &result);
2790
2791 // Call the function.
2792 CodeForSourcePosition(node->position());
2793 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2794 CallFunctionStub call_function(arg_count, in_loop);
2795 result = frame_->CallStub(&call_function, arg_count + 1);
2796
2797 // Restore the context and overwrite the function on the stack with
2798 // the result.
2799 frame_->RestoreContextRegister();
2800 frame_->SetElementAt(0, &result);
2801
2802 } else if (var != NULL && !var->is_this() && var->is_global()) {
2803 // ----------------------------------
2804 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2805 // ----------------------------------
2806
2807 // Push the name of the function and the receiver onto the stack.
2808 frame_->Push(var->name());
2809
2810 // Pass the global object as the receiver and let the IC stub
2811 // patch the stack to use the global proxy as 'this' in the
2812 // invoked function.
2813 LoadGlobal();
2814
2815 // Load the arguments.
2816 int arg_count = args->length();
2817 for (int i = 0; i < arg_count; i++) {
2818 Load(args->at(i));
2819 }
2820
2821 // Call the IC initialization code.
2822 CodeForSourcePosition(node->position());
2823 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2824 arg_count,
2825 loop_nesting());
2826 frame_->RestoreContextRegister();
2827 // Replace the function on the stack with the result.
2828 frame_->SetElementAt(0, &result);
2829
2830 } else if (var != NULL && var->slot() != NULL &&
2831 var->slot()->type() == Slot::LOOKUP) {
2832 // ----------------------------------
2833 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
2834 // ----------------------------------
2835
2836 // Load the function from the context. Sync the frame so we can
2837 // push the arguments directly into place.
2838 frame_->SyncRange(0, frame_->element_count() - 1);
2839 frame_->EmitPush(rsi);
2840 frame_->EmitPush(var->name());
2841 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2842 // The runtime call returns a pair of values in rax and rdx. The
2843 // looked-up function is in rax and the receiver is in rdx. These
2844 // register references are not ref counted here. We spill them
2845 // eagerly since they are arguments to an inevitable call (and are
2846 // not sharable by the arguments).
2847 ASSERT(!allocator()->is_used(rax));
2848 frame_->EmitPush(rax);
2849
2850 // Load the receiver.
2851 ASSERT(!allocator()->is_used(rdx));
2852 frame_->EmitPush(rdx);
2853
2854 // Call the function.
2855 CallWithArguments(args, node->position());
2856
2857 } else if (property != NULL) {
2858 // Check if the key is a literal string.
2859 Literal* literal = property->key()->AsLiteral();
2860
2861 if (literal != NULL && literal->handle()->IsSymbol()) {
2862 // ------------------------------------------------------------------
2863 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2864 // ------------------------------------------------------------------
2865
2866 Handle<String> name = Handle<String>::cast(literal->handle());
2867
2868 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2869 name->IsEqualTo(CStrVector("apply")) &&
2870 args->length() == 2 &&
2871 args->at(1)->AsVariableProxy() != NULL &&
2872 args->at(1)->AsVariableProxy()->IsArguments()) {
2873 // Use the optimized Function.prototype.apply that avoids
2874 // allocating lazily allocated arguments objects.
2875 CallApplyLazy(property,
2876 args->at(0),
2877 args->at(1)->AsVariableProxy(),
2878 node->position());
2879
2880 } else {
2881 // Push the name of the function and the receiver onto the stack.
2882 frame_->Push(name);
2883 Load(property->obj());
2884
2885 // Load the arguments.
2886 int arg_count = args->length();
2887 for (int i = 0; i < arg_count; i++) {
2888 Load(args->at(i));
2889 }
2890
2891 // Call the IC initialization code.
2892 CodeForSourcePosition(node->position());
2893 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2894 arg_count,
2895 loop_nesting());
2896 frame_->RestoreContextRegister();
2897 // Replace the function on the stack with the result.
2898 frame_->SetElementAt(0, &result);
2899 }
2900
2901 } else {
2902 // -------------------------------------------
2903 // JavaScript example: 'array[index](1, 2, 3)'
2904 // -------------------------------------------
2905
2906 // Load the function to call from the property through a reference.
2907 Reference ref(this, property);
Steve Blockd0582a62009-12-15 09:54:21 +00002908 ref.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00002909
2910 // Pass receiver to called function.
2911 if (property->is_synthetic()) {
2912 // Use global object as receiver.
2913 LoadGlobalReceiver();
2914 } else {
2915 // The reference's size is non-negative.
2916 frame_->PushElementAt(ref.size());
2917 }
2918
2919 // Call the function.
2920 CallWithArguments(args, node->position());
2921 }
2922
2923 } else {
2924 // ----------------------------------
2925 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
2926 // ----------------------------------
2927
2928 // Load the function.
2929 Load(function);
2930
2931 // Pass the global proxy as the receiver.
2932 LoadGlobalReceiver();
2933
2934 // Call the function.
2935 CallWithArguments(args, node->position());
2936 }
2937}
2938
2939
2940void CodeGenerator::VisitCallNew(CallNew* node) {
2941 Comment cmnt(masm_, "[ CallNew");
2942
2943 // According to ECMA-262, section 11.2.2, page 44, the function
2944 // expression in new calls must be evaluated before the
2945 // arguments. This is different from ordinary calls, where the
2946 // actual function to call is resolved after the arguments have been
2947 // evaluated.
2948
2949 // Compute function to call and use the global object as the
2950 // receiver. There is no need to use the global proxy here because
2951 // it will always be replaced with a newly allocated object.
2952 Load(node->expression());
2953 LoadGlobal();
2954
2955 // Push the arguments ("left-to-right") on the stack.
2956 ZoneList<Expression*>* args = node->arguments();
2957 int arg_count = args->length();
2958 for (int i = 0; i < arg_count; i++) {
2959 Load(args->at(i));
2960 }
2961
2962 // Call the construct call builtin that handles allocation and
2963 // constructor invocation.
2964 CodeForSourcePosition(node->position());
2965 Result result = frame_->CallConstructor(arg_count);
2966 // Replace the function on the stack with the result.
2967 frame_->SetElementAt(0, &result);
2968}
2969
2970
2971void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2972 if (CheckForInlineRuntimeCall(node)) {
2973 return;
2974 }
2975
2976 ZoneList<Expression*>* args = node->arguments();
2977 Comment cmnt(masm_, "[ CallRuntime");
2978 Runtime::Function* function = node->function();
2979
2980 if (function == NULL) {
2981 // Prepare stack for calling JS runtime function.
2982 frame_->Push(node->name());
2983 // Push the builtins object found in the current global object.
2984 Result temp = allocator()->Allocate();
2985 ASSERT(temp.is_valid());
2986 __ movq(temp.reg(), GlobalObject());
2987 __ movq(temp.reg(),
2988 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
2989 frame_->Push(&temp);
2990 }
2991
2992 // Push the arguments ("left-to-right").
2993 int arg_count = args->length();
2994 for (int i = 0; i < arg_count; i++) {
2995 Load(args->at(i));
2996 }
2997
2998 if (function == NULL) {
2999 // Call the JS runtime function.
3000 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
3001 arg_count,
3002 loop_nesting_);
3003 frame_->RestoreContextRegister();
3004 frame_->SetElementAt(0, &answer);
3005 } else {
3006 // Call the C runtime function.
3007 Result answer = frame_->CallRuntime(function, arg_count);
3008 frame_->Push(&answer);
3009 }
3010}
3011
3012
3013void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003014 Comment cmnt(masm_, "[ UnaryOperation");
3015
3016 Token::Value op = node->op();
3017
3018 if (op == Token::NOT) {
3019 // Swap the true and false targets but keep the same actual label
3020 // as the fall through.
3021 destination()->Invert();
Steve Blockd0582a62009-12-15 09:54:21 +00003022 LoadCondition(node->expression(), destination(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00003023 // Swap the labels back.
3024 destination()->Invert();
3025
3026 } else if (op == Token::DELETE) {
3027 Property* property = node->expression()->AsProperty();
3028 if (property != NULL) {
3029 Load(property->obj());
3030 Load(property->key());
3031 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
3032 frame_->Push(&answer);
3033 return;
3034 }
3035
3036 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3037 if (variable != NULL) {
3038 Slot* slot = variable->slot();
3039 if (variable->is_global()) {
3040 LoadGlobal();
3041 frame_->Push(variable->name());
3042 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3043 CALL_FUNCTION, 2);
3044 frame_->Push(&answer);
3045 return;
3046
3047 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3048 // Call the runtime to look up the context holding the named
3049 // variable. Sync the virtual frame eagerly so we can push the
3050 // arguments directly into place.
3051 frame_->SyncRange(0, frame_->element_count() - 1);
3052 frame_->EmitPush(rsi);
3053 frame_->EmitPush(variable->name());
3054 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3055 ASSERT(context.is_register());
3056 frame_->EmitPush(context.reg());
3057 context.Unuse();
3058 frame_->EmitPush(variable->name());
3059 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3060 CALL_FUNCTION, 2);
3061 frame_->Push(&answer);
3062 return;
3063 }
3064
3065 // Default: Result of deleting non-global, not dynamically
3066 // introduced variables is false.
3067 frame_->Push(Factory::false_value());
3068
3069 } else {
3070 // Default: Result of deleting expressions is true.
3071 Load(node->expression()); // may have side-effects
3072 frame_->SetElementAt(0, Factory::true_value());
3073 }
3074
3075 } else if (op == Token::TYPEOF) {
3076 // Special case for loading the typeof expression; see comment on
3077 // LoadTypeofExpression().
3078 LoadTypeofExpression(node->expression());
3079 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3080 frame_->Push(&answer);
3081
3082 } else if (op == Token::VOID) {
3083 Expression* expression = node->expression();
3084 if (expression && expression->AsLiteral() && (
3085 expression->AsLiteral()->IsTrue() ||
3086 expression->AsLiteral()->IsFalse() ||
3087 expression->AsLiteral()->handle()->IsNumber() ||
3088 expression->AsLiteral()->handle()->IsString() ||
3089 expression->AsLiteral()->handle()->IsJSRegExp() ||
3090 expression->AsLiteral()->IsNull())) {
3091 // Omit evaluating the value of the primitive literal.
3092 // It will be discarded anyway, and can have no side effect.
3093 frame_->Push(Factory::undefined_value());
3094 } else {
3095 Load(node->expression());
3096 frame_->SetElementAt(0, Factory::undefined_value());
3097 }
3098
3099 } else {
3100 Load(node->expression());
3101 switch (op) {
3102 case Token::NOT:
3103 case Token::DELETE:
3104 case Token::TYPEOF:
3105 UNREACHABLE(); // handled above
3106 break;
3107
3108 case Token::SUB: {
3109 bool overwrite =
Steve Block3ce2e202009-11-05 08:53:23 +00003110 (node->expression()->AsBinaryOperation() != NULL &&
3111 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00003112 UnarySubStub stub(overwrite);
3113 // TODO(1222589): remove dependency of TOS being cached inside stub
3114 Result operand = frame_->Pop();
3115 Result answer = frame_->CallStub(&stub, &operand);
3116 frame_->Push(&answer);
3117 break;
3118 }
3119
3120 case Token::BIT_NOT: {
3121 // Smi check.
3122 JumpTarget smi_label;
3123 JumpTarget continue_label;
3124 Result operand = frame_->Pop();
3125 operand.ToRegister();
3126
3127 Condition is_smi = masm_->CheckSmi(operand.reg());
3128 smi_label.Branch(is_smi, &operand);
3129
3130 frame_->Push(&operand); // undo popping of TOS
3131 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
3132 CALL_FUNCTION, 1);
3133 continue_label.Jump(&answer);
3134 smi_label.Bind(&answer);
3135 answer.ToRegister();
3136 frame_->Spill(answer.reg());
3137 __ SmiNot(answer.reg(), answer.reg());
3138 continue_label.Bind(&answer);
3139 frame_->Push(&answer);
3140 break;
3141 }
3142
3143 case Token::ADD: {
3144 // Smi check.
3145 JumpTarget continue_label;
3146 Result operand = frame_->Pop();
3147 operand.ToRegister();
3148 Condition is_smi = masm_->CheckSmi(operand.reg());
3149 continue_label.Branch(is_smi, &operand);
3150 frame_->Push(&operand);
3151 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3152 CALL_FUNCTION, 1);
3153
3154 continue_label.Bind(&answer);
3155 frame_->Push(&answer);
3156 break;
3157 }
3158
3159 default:
3160 UNREACHABLE();
3161 }
3162 }
3163}
3164
3165
3166// The value in dst was optimistically incremented or decremented. The
3167// result overflowed or was not smi tagged. Undo the operation, call
3168// into the runtime to convert the argument to a number, and call the
3169// specialized add or subtract stub. The result is left in dst.
3170class DeferredPrefixCountOperation: public DeferredCode {
3171 public:
3172 DeferredPrefixCountOperation(Register dst, bool is_increment)
3173 : dst_(dst), is_increment_(is_increment) {
3174 set_comment("[ DeferredCountOperation");
3175 }
3176
3177 virtual void Generate();
3178
3179 private:
3180 Register dst_;
3181 bool is_increment_;
3182};
3183
3184
3185void DeferredPrefixCountOperation::Generate() {
3186 __ push(dst_);
3187 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3188 __ push(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00003189 __ Push(Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00003190 if (is_increment_) {
3191 __ CallRuntime(Runtime::kNumberAdd, 2);
3192 } else {
3193 __ CallRuntime(Runtime::kNumberSub, 2);
3194 }
3195 if (!dst_.is(rax)) __ movq(dst_, rax);
3196}
3197
3198
3199// The value in dst was optimistically incremented or decremented. The
3200// result overflowed or was not smi tagged. Undo the operation and call
3201// into the runtime to convert the argument to a number. Update the
3202// original value in old. Call the specialized add or subtract stub.
3203// The result is left in dst.
3204class DeferredPostfixCountOperation: public DeferredCode {
3205 public:
3206 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
3207 : dst_(dst), old_(old), is_increment_(is_increment) {
3208 set_comment("[ DeferredCountOperation");
3209 }
3210
3211 virtual void Generate();
3212
3213 private:
3214 Register dst_;
3215 Register old_;
3216 bool is_increment_;
3217};
3218
3219
3220void DeferredPostfixCountOperation::Generate() {
3221 __ push(dst_);
3222 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3223
3224 // Save the result of ToNumber to use as the old value.
3225 __ push(rax);
3226
3227 // Call the runtime for the addition or subtraction.
3228 __ push(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00003229 __ Push(Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00003230 if (is_increment_) {
3231 __ CallRuntime(Runtime::kNumberAdd, 2);
3232 } else {
3233 __ CallRuntime(Runtime::kNumberSub, 2);
3234 }
3235 if (!dst_.is(rax)) __ movq(dst_, rax);
3236 __ pop(old_);
3237}
3238
3239
3240void CodeGenerator::VisitCountOperation(CountOperation* node) {
3241 Comment cmnt(masm_, "[ CountOperation");
3242
3243 bool is_postfix = node->is_postfix();
3244 bool is_increment = node->op() == Token::INC;
3245
3246 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3247 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3248
3249 // Postfix operations need a stack slot under the reference to hold
3250 // the old value while the new value is being stored. This is so that
3251 // in the case that storing the new value requires a call, the old
3252 // value will be in the frame to be spilled.
3253 if (is_postfix) frame_->Push(Smi::FromInt(0));
3254
3255 { Reference target(this, node->expression());
3256 if (target.is_illegal()) {
3257 // Spoof the virtual frame to have the expected height (one higher
3258 // than on entry).
3259 if (!is_postfix) frame_->Push(Smi::FromInt(0));
3260 return;
3261 }
Steve Blockd0582a62009-12-15 09:54:21 +00003262 target.TakeValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00003263
3264 Result new_value = frame_->Pop();
3265 new_value.ToRegister();
3266
3267 Result old_value; // Only allocated in the postfix case.
3268 if (is_postfix) {
3269 // Allocate a temporary to preserve the old value.
3270 old_value = allocator_->Allocate();
3271 ASSERT(old_value.is_valid());
3272 __ movq(old_value.reg(), new_value.reg());
3273 }
3274 // Ensure the new value is writable.
3275 frame_->Spill(new_value.reg());
3276
3277 DeferredCode* deferred = NULL;
3278 if (is_postfix) {
3279 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3280 old_value.reg(),
3281 is_increment);
3282 } else {
3283 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3284 is_increment);
3285 }
3286
Steve Block3ce2e202009-11-05 08:53:23 +00003287 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003288 if (is_increment) {
Steve Block3ce2e202009-11-05 08:53:23 +00003289 __ SmiAddConstant(kScratchRegister,
3290 new_value.reg(),
3291 Smi::FromInt(1),
3292 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003293 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00003294 __ SmiSubConstant(kScratchRegister,
3295 new_value.reg(),
3296 Smi::FromInt(1),
3297 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00003298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003299 __ movq(new_value.reg(), kScratchRegister);
3300 deferred->BindExit();
3301
3302 // Postfix: store the old value in the allocated slot under the
3303 // reference.
3304 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3305
3306 frame_->Push(&new_value);
3307 // Non-constant: update the reference.
3308 if (!is_const) target.SetValue(NOT_CONST_INIT);
3309 }
3310
3311 // Postfix: drop the new value and use the old.
3312 if (is_postfix) frame_->Drop();
3313}
3314
3315
3316void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3317 // TODO(X64): This code was copied verbatim from codegen-ia32.
3318 // Either find a reason to change it or move it to a shared location.
3319
Steve Blocka7e24c12009-10-30 11:49:00 +00003320 Comment cmnt(masm_, "[ BinaryOperation");
3321 Token::Value op = node->op();
3322
3323 // According to ECMA-262 section 11.11, page 58, the binary logical
3324 // operators must yield the result of one of the two expressions
3325 // before any ToBoolean() conversions. This means that the value
3326 // produced by a && or || operator is not necessarily a boolean.
3327
3328 // NOTE: If the left hand side produces a materialized value (not
3329 // control flow), we force the right hand side to do the same. This
3330 // is necessary because we assume that if we get control flow on the
3331 // last path out of an expression we got it on all paths.
3332 if (op == Token::AND) {
3333 JumpTarget is_true;
3334 ControlDestination dest(&is_true, destination()->false_target(), true);
Steve Blockd0582a62009-12-15 09:54:21 +00003335 LoadCondition(node->left(), &dest, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003336
3337 if (dest.false_was_fall_through()) {
3338 // The current false target was used as the fall-through. If
3339 // there are no dangling jumps to is_true then the left
3340 // subexpression was unconditionally false. Otherwise we have
3341 // paths where we do have to evaluate the right subexpression.
3342 if (is_true.is_linked()) {
3343 // We need to compile the right subexpression. If the jump to
3344 // the current false target was a forward jump then we have a
3345 // valid frame, we have just bound the false target, and we
3346 // have to jump around the code for the right subexpression.
3347 if (has_valid_frame()) {
3348 destination()->false_target()->Unuse();
3349 destination()->false_target()->Jump();
3350 }
3351 is_true.Bind();
3352 // The left subexpression compiled to control flow, so the
3353 // right one is free to do so as well.
Steve Blockd0582a62009-12-15 09:54:21 +00003354 LoadCondition(node->right(), destination(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003355 } else {
3356 // We have actually just jumped to or bound the current false
3357 // target but the current control destination is not marked as
3358 // used.
3359 destination()->Use(false);
3360 }
3361
3362 } else if (dest.is_used()) {
3363 // The left subexpression compiled to control flow (and is_true
3364 // was just bound), so the right is free to do so as well.
Steve Blockd0582a62009-12-15 09:54:21 +00003365 LoadCondition(node->right(), destination(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003366
3367 } else {
3368 // We have a materialized value on the frame, so we exit with
3369 // one on all paths. There are possibly also jumps to is_true
3370 // from nested subexpressions.
3371 JumpTarget pop_and_continue;
3372 JumpTarget exit;
3373
3374 // Avoid popping the result if it converts to 'false' using the
3375 // standard ToBoolean() conversion as described in ECMA-262,
3376 // section 9.2, page 30.
3377 //
3378 // Duplicate the TOS value. The duplicate will be popped by
3379 // ToBoolean.
3380 frame_->Dup();
3381 ControlDestination dest(&pop_and_continue, &exit, true);
3382 ToBoolean(&dest);
3383
3384 // Pop the result of evaluating the first part.
3385 frame_->Drop();
3386
3387 // Compile right side expression.
3388 is_true.Bind();
3389 Load(node->right());
3390
3391 // Exit (always with a materialized value).
3392 exit.Bind();
3393 }
3394
3395 } else if (op == Token::OR) {
3396 JumpTarget is_false;
3397 ControlDestination dest(destination()->true_target(), &is_false, false);
Steve Blockd0582a62009-12-15 09:54:21 +00003398 LoadCondition(node->left(), &dest, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003399
3400 if (dest.true_was_fall_through()) {
3401 // The current true target was used as the fall-through. If
3402 // there are no dangling jumps to is_false then the left
3403 // subexpression was unconditionally true. Otherwise we have
3404 // paths where we do have to evaluate the right subexpression.
3405 if (is_false.is_linked()) {
3406 // We need to compile the right subexpression. If the jump to
3407 // the current true target was a forward jump then we have a
3408 // valid frame, we have just bound the true target, and we
3409 // have to jump around the code for the right subexpression.
3410 if (has_valid_frame()) {
3411 destination()->true_target()->Unuse();
3412 destination()->true_target()->Jump();
3413 }
3414 is_false.Bind();
3415 // The left subexpression compiled to control flow, so the
3416 // right one is free to do so as well.
Steve Blockd0582a62009-12-15 09:54:21 +00003417 LoadCondition(node->right(), destination(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003418 } else {
3419 // We have just jumped to or bound the current true target but
3420 // the current control destination is not marked as used.
3421 destination()->Use(true);
3422 }
3423
3424 } else if (dest.is_used()) {
3425 // The left subexpression compiled to control flow (and is_false
3426 // was just bound), so the right is free to do so as well.
Steve Blockd0582a62009-12-15 09:54:21 +00003427 LoadCondition(node->right(), destination(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00003428
3429 } else {
3430 // We have a materialized value on the frame, so we exit with
3431 // one on all paths. There are possibly also jumps to is_false
3432 // from nested subexpressions.
3433 JumpTarget pop_and_continue;
3434 JumpTarget exit;
3435
3436 // Avoid popping the result if it converts to 'true' using the
3437 // standard ToBoolean() conversion as described in ECMA-262,
3438 // section 9.2, page 30.
3439 //
3440 // Duplicate the TOS value. The duplicate will be popped by
3441 // ToBoolean.
3442 frame_->Dup();
3443 ControlDestination dest(&exit, &pop_and_continue, false);
3444 ToBoolean(&dest);
3445
3446 // Pop the result of evaluating the first part.
3447 frame_->Drop();
3448
3449 // Compile right side expression.
3450 is_false.Bind();
3451 Load(node->right());
3452
3453 // Exit (always with a materialized value).
3454 exit.Bind();
3455 }
3456
3457 } else {
3458 // NOTE: The code below assumes that the slow cases (calls to runtime)
3459 // never return a constant/immutable object.
3460 OverwriteMode overwrite_mode = NO_OVERWRITE;
3461 if (node->left()->AsBinaryOperation() != NULL &&
3462 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3463 overwrite_mode = OVERWRITE_LEFT;
3464 } else if (node->right()->AsBinaryOperation() != NULL &&
3465 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3466 overwrite_mode = OVERWRITE_RIGHT;
3467 }
3468
3469 Load(node->left());
3470 Load(node->right());
3471 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3472 }
3473}
3474
3475
3476
3477void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3478 Comment cmnt(masm_, "[ CompareOperation");
3479
3480 // Get the expressions from the node.
3481 Expression* left = node->left();
3482 Expression* right = node->right();
3483 Token::Value op = node->op();
3484 // To make typeof testing for natives implemented in JavaScript really
3485 // efficient, we generate special code for expressions of the form:
3486 // 'typeof <expression> == <string>'.
3487 UnaryOperation* operation = left->AsUnaryOperation();
3488 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3489 (operation != NULL && operation->op() == Token::TYPEOF) &&
3490 (right->AsLiteral() != NULL &&
3491 right->AsLiteral()->handle()->IsString())) {
3492 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3493
3494 // Load the operand and move it to a register.
3495 LoadTypeofExpression(operation->expression());
3496 Result answer = frame_->Pop();
3497 answer.ToRegister();
3498
3499 if (check->Equals(Heap::number_symbol())) {
3500 Condition is_smi = masm_->CheckSmi(answer.reg());
3501 destination()->true_target()->Branch(is_smi);
3502 frame_->Spill(answer.reg());
3503 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3504 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3505 answer.Unuse();
3506 destination()->Split(equal);
3507
3508 } else if (check->Equals(Heap::string_symbol())) {
3509 Condition is_smi = masm_->CheckSmi(answer.reg());
3510 destination()->false_target()->Branch(is_smi);
3511
3512 // It can be an undetectable string object.
3513 __ movq(kScratchRegister,
3514 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3515 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3516 Immediate(1 << Map::kIsUndetectable));
3517 destination()->false_target()->Branch(not_zero);
3518 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3519 answer.Unuse();
3520 destination()->Split(below); // Unsigned byte comparison needed.
3521
3522 } else if (check->Equals(Heap::boolean_symbol())) {
3523 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3524 destination()->true_target()->Branch(equal);
3525 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3526 answer.Unuse();
3527 destination()->Split(equal);
3528
3529 } else if (check->Equals(Heap::undefined_symbol())) {
3530 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3531 destination()->true_target()->Branch(equal);
3532
3533 Condition is_smi = masm_->CheckSmi(answer.reg());
3534 destination()->false_target()->Branch(is_smi);
3535
3536 // It can be an undetectable object.
3537 __ movq(kScratchRegister,
3538 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3539 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3540 Immediate(1 << Map::kIsUndetectable));
3541 answer.Unuse();
3542 destination()->Split(not_zero);
3543
3544 } else if (check->Equals(Heap::function_symbol())) {
3545 Condition is_smi = masm_->CheckSmi(answer.reg());
3546 destination()->false_target()->Branch(is_smi);
3547 frame_->Spill(answer.reg());
3548 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
Steve Blockd0582a62009-12-15 09:54:21 +00003549 destination()->true_target()->Branch(equal);
3550 // Regular expressions are callable so typeof == 'function'.
3551 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003552 answer.Unuse();
3553 destination()->Split(equal);
3554
3555 } else if (check->Equals(Heap::object_symbol())) {
3556 Condition is_smi = masm_->CheckSmi(answer.reg());
3557 destination()->false_target()->Branch(is_smi);
3558 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3559 destination()->true_target()->Branch(equal);
3560
Steve Blockd0582a62009-12-15 09:54:21 +00003561 // Regular expressions are typeof == 'function', not 'object'.
3562 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
3563 destination()->false_target()->Branch(equal);
3564
Steve Blocka7e24c12009-10-30 11:49:00 +00003565 // It can be an undetectable object.
Steve Blocka7e24c12009-10-30 11:49:00 +00003566 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3567 Immediate(1 << Map::kIsUndetectable));
3568 destination()->false_target()->Branch(not_zero);
3569 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3570 destination()->false_target()->Branch(below);
3571 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3572 answer.Unuse();
3573 destination()->Split(below_equal);
3574 } else {
3575 // Uncommon case: typeof testing against a string literal that is
3576 // never returned from the typeof operator.
3577 answer.Unuse();
3578 destination()->Goto(false);
3579 }
3580 return;
3581 }
3582
3583 Condition cc = no_condition;
3584 bool strict = false;
3585 switch (op) {
3586 case Token::EQ_STRICT:
3587 strict = true;
3588 // Fall through
3589 case Token::EQ:
3590 cc = equal;
3591 break;
3592 case Token::LT:
3593 cc = less;
3594 break;
3595 case Token::GT:
3596 cc = greater;
3597 break;
3598 case Token::LTE:
3599 cc = less_equal;
3600 break;
3601 case Token::GTE:
3602 cc = greater_equal;
3603 break;
3604 case Token::IN: {
3605 Load(left);
3606 Load(right);
3607 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3608 frame_->Push(&answer); // push the result
3609 return;
3610 }
3611 case Token::INSTANCEOF: {
3612 Load(left);
3613 Load(right);
3614 InstanceofStub stub;
3615 Result answer = frame_->CallStub(&stub, 2);
3616 answer.ToRegister();
3617 __ testq(answer.reg(), answer.reg());
3618 answer.Unuse();
3619 destination()->Split(zero);
3620 return;
3621 }
3622 default:
3623 UNREACHABLE();
3624 }
3625 Load(left);
3626 Load(right);
3627 Comparison(cc, strict, destination());
3628}
3629
3630
3631void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3632 frame_->PushFunction();
3633}
3634
3635
3636void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3637 ASSERT(args->length() == 1);
3638
3639 // ArgumentsAccessStub expects the key in rdx and the formal
3640 // parameter count in rax.
3641 Load(args->at(0));
3642 Result key = frame_->Pop();
3643 // Explicitly create a constant result.
3644 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3645 // Call the shared stub to get to arguments[key].
3646 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3647 Result result = frame_->CallStub(&stub, &key, &count);
3648 frame_->Push(&result);
3649}
3650
3651
3652void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3653 ASSERT(args->length() == 1);
3654 Load(args->at(0));
3655 Result value = frame_->Pop();
3656 value.ToRegister();
3657 ASSERT(value.is_valid());
3658 Condition is_smi = masm_->CheckSmi(value.reg());
3659 destination()->false_target()->Branch(is_smi);
3660 // It is a heap object - get map.
3661 // Check if the object is a JS array or not.
3662 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3663 value.Unuse();
3664 destination()->Split(equal);
3665}
3666
3667
Steve Blockd0582a62009-12-15 09:54:21 +00003668void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3669 // This generates a fast version of:
3670 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3671 ASSERT(args->length() == 1);
3672 Load(args->at(0));
3673 Result obj = frame_->Pop();
3674 obj.ToRegister();
3675 Condition is_smi = masm_->CheckSmi(obj.reg());
3676 destination()->false_target()->Branch(is_smi);
3677
3678 __ Move(kScratchRegister, Factory::null_value());
3679 __ cmpq(obj.reg(), kScratchRegister);
3680 destination()->true_target()->Branch(equal);
3681
3682 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3683 // Undetectable objects behave like undefined when tested with typeof.
3684 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3685 Immediate(1 << Map::kIsUndetectable));
3686 destination()->false_target()->Branch(not_zero);
3687 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3688 destination()->false_target()->Branch(less);
3689 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3690 obj.Unuse();
3691 destination()->Split(less_equal);
3692}
3693
3694
3695void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3696 // This generates a fast version of:
3697 // (%_ClassOf(arg) === 'Function')
3698 ASSERT(args->length() == 1);
3699 Load(args->at(0));
3700 Result obj = frame_->Pop();
3701 obj.ToRegister();
3702 Condition is_smi = masm_->CheckSmi(obj.reg());
3703 destination()->false_target()->Branch(is_smi);
3704 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
3705 obj.Unuse();
3706 destination()->Split(equal);
3707}
3708
3709
Steve Blocka7e24c12009-10-30 11:49:00 +00003710void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3711 ASSERT(args->length() == 0);
3712
3713 // Get the frame pointer for the calling frame.
3714 Result fp = allocator()->Allocate();
3715 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3716
3717 // Skip the arguments adaptor frame if it exists.
3718 Label check_frame_marker;
Steve Block3ce2e202009-11-05 08:53:23 +00003719 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3720 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00003721 __ j(not_equal, &check_frame_marker);
3722 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3723
3724 // Check the marker in the calling frame.
3725 __ bind(&check_frame_marker);
Steve Block3ce2e202009-11-05 08:53:23 +00003726 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3727 Smi::FromInt(StackFrame::CONSTRUCT));
Steve Blocka7e24c12009-10-30 11:49:00 +00003728 fp.Unuse();
3729 destination()->Split(equal);
3730}
3731
3732
3733void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3734 ASSERT(args->length() == 0);
3735 // ArgumentsAccessStub takes the parameter count as an input argument
3736 // in register eax. Create a constant result for it.
3737 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3738 // Call the shared stub to get to the arguments.length.
3739 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3740 Result result = frame_->CallStub(&stub, &count);
3741 frame_->Push(&result);
3742}
3743
3744
3745void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3746 Comment(masm_, "[ GenerateFastCharCodeAt");
3747 ASSERT(args->length() == 2);
3748
3749 Label slow_case;
3750 Label end;
3751 Label not_a_flat_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00003752 Label try_again_with_new_string;
3753 Label ascii_string;
3754 Label got_char_code;
3755
3756 Load(args->at(0));
3757 Load(args->at(1));
3758 Result index = frame_->Pop();
3759 Result object = frame_->Pop();
3760
3761 // Get register rcx to use as shift amount later.
3762 Result shift_amount;
3763 if (object.is_register() && object.reg().is(rcx)) {
3764 Result fresh = allocator_->Allocate();
3765 shift_amount = object;
3766 object = fresh;
3767 __ movq(object.reg(), rcx);
3768 }
3769 if (index.is_register() && index.reg().is(rcx)) {
3770 Result fresh = allocator_->Allocate();
3771 shift_amount = index;
3772 index = fresh;
3773 __ movq(index.reg(), rcx);
3774 }
3775 // There could be references to ecx in the frame. Allocating will
3776 // spill them, otherwise spill explicitly.
3777 if (shift_amount.is_valid()) {
3778 frame_->Spill(rcx);
3779 } else {
3780 shift_amount = allocator()->Allocate(rcx);
3781 }
3782 ASSERT(shift_amount.is_register());
3783 ASSERT(shift_amount.reg().is(rcx));
3784 ASSERT(allocator_->count(rcx) == 1);
3785
3786 // We will mutate the index register and possibly the object register.
3787 // The case where they are somehow the same register is handled
3788 // because we only mutate them in the case where the receiver is a
3789 // heap object and the index is not.
3790 object.ToRegister();
3791 index.ToRegister();
3792 frame_->Spill(object.reg());
3793 frame_->Spill(index.reg());
3794
3795 // We need a single extra temporary register.
3796 Result temp = allocator()->Allocate();
3797 ASSERT(temp.is_valid());
3798
3799 // There is no virtual frame effect from here up to the final result
3800 // push.
3801
3802 // If the receiver is a smi trigger the slow case.
3803 __ JumpIfSmi(object.reg(), &slow_case);
3804
3805 // If the index is negative or non-smi trigger the slow case.
3806 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3807
3808 // Untag the index.
3809 __ SmiToInteger32(index.reg(), index.reg());
3810
3811 __ bind(&try_again_with_new_string);
3812 // Fetch the instance type of the receiver into rcx.
3813 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3814 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3815 // If the receiver is not a string trigger the slow case.
3816 __ testb(rcx, Immediate(kIsNotStringMask));
3817 __ j(not_zero, &slow_case);
3818
Steve Blocka7e24c12009-10-30 11:49:00 +00003819 // Check for index out of range.
Steve Blockd0582a62009-12-15 09:54:21 +00003820 __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00003821 __ j(greater_equal, &slow_case);
3822 // Reload the instance type (into the temp register this time)..
3823 __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3824 __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3825
3826 // We need special handling for non-flat strings.
Steve Blockd0582a62009-12-15 09:54:21 +00003827 ASSERT_EQ(0, kSeqStringTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00003828 __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3829 __ j(not_zero, &not_a_flat_string);
3830 // Check for 1-byte or 2-byte string.
Steve Blockd0582a62009-12-15 09:54:21 +00003831 ASSERT_EQ(0, kTwoByteStringTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00003832 __ testb(temp.reg(), Immediate(kStringEncodingMask));
3833 __ j(not_zero, &ascii_string);
3834
3835 // 2-byte string.
3836 // Load the 2-byte character code into the temp register.
3837 __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3838 index.reg(),
3839 times_2,
3840 SeqTwoByteString::kHeaderSize));
3841 __ jmp(&got_char_code);
3842
3843 // ASCII string.
3844 __ bind(&ascii_string);
3845 // Load the byte into the temp register.
3846 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3847 index.reg(),
3848 times_1,
3849 SeqAsciiString::kHeaderSize));
3850 __ bind(&got_char_code);
3851 __ Integer32ToSmi(temp.reg(), temp.reg());
3852 __ jmp(&end);
3853
3854 // Handle non-flat strings.
3855 __ bind(&not_a_flat_string);
3856 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3857 __ cmpb(temp.reg(), Immediate(kConsStringTag));
Steve Blocka7e24c12009-10-30 11:49:00 +00003858 __ j(not_equal, &slow_case);
3859
Steve Blocka7e24c12009-10-30 11:49:00 +00003860 // ConsString.
Steve Blockd0582a62009-12-15 09:54:21 +00003861 // Check that the right hand side is the empty string (ie if this is really a
3862 // flat string in a cons string). If that is not the case we would rather go
3863 // to the runtime system now, to flatten the string.
3864 __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
3865 __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
3866 __ j(not_equal, &slow_case);
3867 // Get the first of the two strings.
Steve Blocka7e24c12009-10-30 11:49:00 +00003868 __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3869 __ jmp(&try_again_with_new_string);
3870
3871 __ bind(&slow_case);
3872 // Move the undefined value into the result register, which will
3873 // trigger the slow case.
3874 __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3875
3876 __ bind(&end);
3877 frame_->Push(&temp);
3878}
3879
3880
3881void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3882 ASSERT(args->length() == 1);
3883 Load(args->at(0));
3884 Result value = frame_->Pop();
3885 value.ToRegister();
3886 ASSERT(value.is_valid());
3887 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3888 value.Unuse();
3889 destination()->Split(positive_smi);
3890}
3891
3892
3893void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3894 ASSERT(args->length() == 1);
3895 Load(args->at(0));
3896 Result value = frame_->Pop();
3897 value.ToRegister();
3898 ASSERT(value.is_valid());
3899 Condition is_smi = masm_->CheckSmi(value.reg());
3900 value.Unuse();
3901 destination()->Split(is_smi);
3902}
3903
3904
3905void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3906 // Conditionally generate a log call.
3907 // Args:
3908 // 0 (literal string): The type of logging (corresponds to the flags).
3909 // This is used to determine whether or not to generate the log call.
3910 // 1 (string): Format string. Access the string at argument index 2
3911 // with '%2s' (see Logger::LogRuntime for all the formats).
3912 // 2 (array): Arguments to the format string.
3913 ASSERT_EQ(args->length(), 3);
3914#ifdef ENABLE_LOGGING_AND_PROFILING
3915 if (ShouldGenerateLog(args->at(0))) {
3916 Load(args->at(1));
3917 Load(args->at(2));
3918 frame_->CallRuntime(Runtime::kLog, 2);
3919 }
3920#endif
3921 // Finally, we're expected to leave a value on the top of the stack.
3922 frame_->Push(Factory::undefined_value());
3923}
3924
3925
3926void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3927 ASSERT(args->length() == 2);
3928
3929 // Load the two objects into registers and perform the comparison.
3930 Load(args->at(0));
3931 Load(args->at(1));
3932 Result right = frame_->Pop();
3933 Result left = frame_->Pop();
3934 right.ToRegister();
3935 left.ToRegister();
3936 __ cmpq(right.reg(), left.reg());
3937 right.Unuse();
3938 left.Unuse();
3939 destination()->Split(equal);
3940}
3941
3942
3943void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
3944 ASSERT(args->length() == 0);
3945 // RBP value is aligned, so it should be tagged as a smi (without necesarily
Steve Block3ce2e202009-11-05 08:53:23 +00003946 // being padded as a smi, so it should not be treated as a smi.).
Steve Blocka7e24c12009-10-30 11:49:00 +00003947 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3948 Result rbp_as_smi = allocator_->Allocate();
3949 ASSERT(rbp_as_smi.is_valid());
3950 __ movq(rbp_as_smi.reg(), rbp);
3951 frame_->Push(&rbp_as_smi);
3952}
3953
3954
3955void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3956 ASSERT(args->length() == 0);
3957 frame_->SpillAll();
3958 __ push(rsi);
3959
3960 // Make sure the frame is aligned like the OS expects.
3961 static const int kFrameAlignment = OS::ActivationFrameAlignment();
3962 if (kFrameAlignment > 0) {
3963 ASSERT(IsPowerOf2(kFrameAlignment));
3964 __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
3965 __ and_(rsp, Immediate(-kFrameAlignment));
3966 }
3967
3968 // Call V8::RandomPositiveSmi().
3969 __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
3970
3971 // Restore stack pointer from callee-saved register.
3972 if (kFrameAlignment > 0) {
3973 __ movq(rsp, rbx);
3974 }
3975
3976 __ pop(rsi);
3977 Result result = allocator_->Allocate(rax);
3978 frame_->Push(&result);
3979}
3980
3981
3982void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3983 JumpTarget done;
3984 JumpTarget call_runtime;
3985 ASSERT(args->length() == 1);
3986
3987 // Load number and duplicate it.
3988 Load(args->at(0));
3989 frame_->Dup();
3990
3991 // Get the number into an unaliased register and load it onto the
3992 // floating point stack still leaving one copy on the frame.
3993 Result number = frame_->Pop();
3994 number.ToRegister();
3995 frame_->Spill(number.reg());
3996 FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
3997 number.Unuse();
3998
3999 // Perform the operation on the number.
4000 switch (op) {
4001 case SIN:
4002 __ fsin();
4003 break;
4004 case COS:
4005 __ fcos();
4006 break;
4007 }
4008
4009 // Go slow case if argument to operation is out of range.
4010 Result eax_reg = allocator()->Allocate(rax);
4011 ASSERT(eax_reg.is_valid());
4012 __ fnstsw_ax();
4013 __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
4014 eax_reg.Unuse();
4015 call_runtime.Branch(not_zero);
4016
4017 // Allocate heap number for result if possible.
4018 Result scratch = allocator()->Allocate();
4019 Result heap_number = allocator()->Allocate();
Steve Block3ce2e202009-11-05 08:53:23 +00004020 __ AllocateHeapNumber(heap_number.reg(),
4021 scratch.reg(),
4022 call_runtime.entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00004023 scratch.Unuse();
4024
4025 // Store the result in the allocated heap number.
4026 __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
4027 // Replace the extra copy of the argument with the result.
4028 frame_->SetElementAt(0, &heap_number);
4029 done.Jump();
4030
4031 call_runtime.Bind();
4032 // Free ST(0) which was not popped before calling into the runtime.
4033 __ ffree(0);
4034 Result answer;
4035 switch (op) {
4036 case SIN:
4037 answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
4038 break;
4039 case COS:
4040 answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
4041 break;
4042 }
4043 frame_->Push(&answer);
4044 done.Bind();
4045}
4046
4047
Steve Blockd0582a62009-12-15 09:54:21 +00004048void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4049 ASSERT_EQ(2, args->length());
4050
4051 Load(args->at(0));
4052 Load(args->at(1));
4053
4054 Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
4055 frame_->Push(&answer);
4056}
4057
4058
Steve Blocka7e24c12009-10-30 11:49:00 +00004059void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4060 ASSERT(args->length() == 1);
4061 JumpTarget leave, null, function, non_function_constructor;
4062 Load(args->at(0)); // Load the object.
4063 Result obj = frame_->Pop();
4064 obj.ToRegister();
4065 frame_->Spill(obj.reg());
4066
4067 // If the object is a smi, we return null.
4068 Condition is_smi = masm_->CheckSmi(obj.reg());
4069 null.Branch(is_smi);
4070
4071 // Check that the object is a JS object but take special care of JS
4072 // functions to make sure they have 'Function' as their class.
4073
4074 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4075 null.Branch(below);
4076
4077 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4078 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4079 // LAST_JS_OBJECT_TYPE.
4080 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4081 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4082 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4083 function.Branch(equal);
4084
4085 // Check if the constructor in the map is a function.
4086 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4087 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4088 non_function_constructor.Branch(not_equal);
4089
4090 // The obj register now contains the constructor function. Grab the
4091 // instance class name from there.
4092 __ movq(obj.reg(),
4093 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4094 __ movq(obj.reg(),
4095 FieldOperand(obj.reg(),
4096 SharedFunctionInfo::kInstanceClassNameOffset));
4097 frame_->Push(&obj);
4098 leave.Jump();
4099
4100 // Functions have class 'Function'.
4101 function.Bind();
4102 frame_->Push(Factory::function_class_symbol());
4103 leave.Jump();
4104
4105 // Objects with a non-function constructor have class 'Object'.
4106 non_function_constructor.Bind();
4107 frame_->Push(Factory::Object_symbol());
4108 leave.Jump();
4109
4110 // Non-JS objects have class null.
4111 null.Bind();
4112 frame_->Push(Factory::null_value());
4113
4114 // All done.
4115 leave.Bind();
4116}
4117
4118
4119void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4120 ASSERT(args->length() == 2);
4121 JumpTarget leave;
4122 Load(args->at(0)); // Load the object.
4123 Load(args->at(1)); // Load the value.
4124 Result value = frame_->Pop();
4125 Result object = frame_->Pop();
4126 value.ToRegister();
4127 object.ToRegister();
4128
4129 // if (object->IsSmi()) return value.
4130 Condition is_smi = masm_->CheckSmi(object.reg());
4131 leave.Branch(is_smi, &value);
4132
4133 // It is a heap object - get its map.
4134 Result scratch = allocator_->Allocate();
4135 ASSERT(scratch.is_valid());
4136 // if (!object->IsJSValue()) return value.
4137 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4138 leave.Branch(not_equal, &value);
4139
4140 // Store the value.
4141 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4142 // Update the write barrier. Save the value as it will be
4143 // overwritten by the write barrier code and is needed afterward.
4144 Result duplicate_value = allocator_->Allocate();
4145 ASSERT(duplicate_value.is_valid());
4146 __ movq(duplicate_value.reg(), value.reg());
4147 // The object register is also overwritten by the write barrier and
4148 // possibly aliased in the frame.
4149 frame_->Spill(object.reg());
4150 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4151 scratch.reg());
4152 object.Unuse();
4153 scratch.Unuse();
4154 duplicate_value.Unuse();
4155
4156 // Leave.
4157 leave.Bind(&value);
4158 frame_->Push(&value);
4159}
4160
4161
4162void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4163 ASSERT(args->length() == 1);
4164 JumpTarget leave;
4165 Load(args->at(0)); // Load the object.
4166 frame_->Dup();
4167 Result object = frame_->Pop();
4168 object.ToRegister();
4169 ASSERT(object.is_valid());
4170 // if (object->IsSmi()) return object.
4171 Condition is_smi = masm_->CheckSmi(object.reg());
4172 leave.Branch(is_smi);
4173 // It is a heap object - get map.
4174 Result temp = allocator()->Allocate();
4175 ASSERT(temp.is_valid());
4176 // if (!object->IsJSValue()) return object.
4177 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4178 leave.Branch(not_equal);
4179 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4180 object.Unuse();
4181 frame_->SetElementAt(0, &temp);
4182 leave.Bind();
4183}
4184
4185
4186// -----------------------------------------------------------------------------
4187// CodeGenerator implementation of Expressions
4188
Steve Blockd0582a62009-12-15 09:54:21 +00004189void CodeGenerator::LoadAndSpill(Expression* expression) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004190 // TODO(x64): No architecture specific code. Move to shared location.
4191 ASSERT(in_spilled_code());
4192 set_in_spilled_code(false);
Steve Blockd0582a62009-12-15 09:54:21 +00004193 Load(expression);
Steve Blocka7e24c12009-10-30 11:49:00 +00004194 frame_->SpillAll();
4195 set_in_spilled_code(true);
4196}
4197
4198
Steve Blockd0582a62009-12-15 09:54:21 +00004199void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004200#ifdef DEBUG
4201 int original_height = frame_->height();
4202#endif
4203 ASSERT(!in_spilled_code());
4204 JumpTarget true_target;
4205 JumpTarget false_target;
4206 ControlDestination dest(&true_target, &false_target, true);
Steve Blockd0582a62009-12-15 09:54:21 +00004207 LoadCondition(expr, &dest, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00004208
4209 if (dest.false_was_fall_through()) {
4210 // The false target was just bound.
4211 JumpTarget loaded;
4212 frame_->Push(Factory::false_value());
4213 // There may be dangling jumps to the true target.
4214 if (true_target.is_linked()) {
4215 loaded.Jump();
4216 true_target.Bind();
4217 frame_->Push(Factory::true_value());
4218 loaded.Bind();
4219 }
4220
4221 } else if (dest.is_used()) {
4222 // There is true, and possibly false, control flow (with true as
4223 // the fall through).
4224 JumpTarget loaded;
4225 frame_->Push(Factory::true_value());
4226 if (false_target.is_linked()) {
4227 loaded.Jump();
4228 false_target.Bind();
4229 frame_->Push(Factory::false_value());
4230 loaded.Bind();
4231 }
4232
4233 } else {
4234 // We have a valid value on top of the frame, but we still may
4235 // have dangling jumps to the true and false targets from nested
4236 // subexpressions (eg, the left subexpressions of the
4237 // short-circuited boolean operators).
4238 ASSERT(has_valid_frame());
4239 if (true_target.is_linked() || false_target.is_linked()) {
4240 JumpTarget loaded;
4241 loaded.Jump(); // Don't lose the current TOS.
4242 if (true_target.is_linked()) {
4243 true_target.Bind();
4244 frame_->Push(Factory::true_value());
4245 if (false_target.is_linked()) {
4246 loaded.Jump();
4247 }
4248 }
4249 if (false_target.is_linked()) {
4250 false_target.Bind();
4251 frame_->Push(Factory::false_value());
4252 }
4253 loaded.Bind();
4254 }
4255 }
4256
4257 ASSERT(has_valid_frame());
4258 ASSERT(frame_->height() == original_height + 1);
4259}
4260
4261
4262// Emit code to load the value of an expression to the top of the
4263// frame. If the expression is boolean-valued it may be compiled (or
4264// partially compiled) into control flow to the control destination.
4265// If force_control is true, control flow is forced.
4266void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +00004267 ControlDestination* dest,
4268 bool force_control) {
4269 ASSERT(!in_spilled_code());
4270 int original_height = frame_->height();
4271
Steve Blockd0582a62009-12-15 09:54:21 +00004272 { CodeGenState new_state(this, dest);
Steve Blocka7e24c12009-10-30 11:49:00 +00004273 Visit(x);
4274
4275 // If we hit a stack overflow, we may not have actually visited
4276 // the expression. In that case, we ensure that we have a
4277 // valid-looking frame state because we will continue to generate
4278 // code as we unwind the C++ stack.
4279 //
4280 // It's possible to have both a stack overflow and a valid frame
4281 // state (eg, a subexpression overflowed, visiting it returned
4282 // with a dummied frame state, and visiting this expression
4283 // returned with a normal-looking state).
4284 if (HasStackOverflow() &&
4285 !dest->is_used() &&
4286 frame_->height() == original_height) {
4287 dest->Goto(true);
4288 }
4289 }
4290
4291 if (force_control && !dest->is_used()) {
4292 // Convert the TOS value into flow to the control destination.
4293 // TODO(X64): Make control flow to control destinations work.
4294 ToBoolean(dest);
4295 }
4296
4297 ASSERT(!(force_control && !dest->is_used()));
4298 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4299}
4300
4301
Steve Blocka7e24c12009-10-30 11:49:00 +00004302// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4303// convert it to a boolean in the condition code register or jump to
4304// 'false_target'/'true_target' as appropriate.
4305void CodeGenerator::ToBoolean(ControlDestination* dest) {
4306 Comment cmnt(masm_, "[ ToBoolean");
4307
4308 // The value to convert should be popped from the frame.
4309 Result value = frame_->Pop();
4310 value.ToRegister();
4311 // Fast case checks.
4312
4313 // 'false' => false.
4314 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4315 dest->false_target()->Branch(equal);
4316
4317 // 'true' => true.
4318 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4319 dest->true_target()->Branch(equal);
4320
4321 // 'undefined' => false.
4322 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4323 dest->false_target()->Branch(equal);
4324
4325 // Smi => false iff zero.
Steve Block3ce2e202009-11-05 08:53:23 +00004326 __ SmiCompare(value.reg(), Smi::FromInt(0));
4327 dest->false_target()->Branch(equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00004328 Condition is_smi = masm_->CheckSmi(value.reg());
4329 dest->true_target()->Branch(is_smi);
4330
4331 // Call the stub for all other cases.
4332 frame_->Push(&value); // Undo the Pop() from above.
4333 ToBooleanStub stub;
4334 Result temp = frame_->CallStub(&stub, 1);
4335 // Convert the result to a condition code.
4336 __ testq(temp.reg(), temp.reg());
4337 temp.Unuse();
4338 dest->Split(not_equal);
4339}
4340
4341
4342void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4343 UNIMPLEMENTED();
4344 // TODO(X64): Implement security policy for loads of smis.
4345}
4346
4347
4348bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4349 return false;
4350}
4351
4352//------------------------------------------------------------------------------
4353// CodeGenerator implementation of variables, lookups, and stores.
4354
4355Reference::Reference(CodeGenerator* cgen, Expression* expression)
4356 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
4357 cgen->LoadReference(this);
4358}
4359
4360
4361Reference::~Reference() {
4362 cgen_->UnloadReference(this);
4363}
4364
4365
4366void CodeGenerator::LoadReference(Reference* ref) {
4367 // References are loaded from both spilled and unspilled code. Set the
4368 // state to unspilled to allow that (and explicitly spill after
4369 // construction at the construction sites).
4370 bool was_in_spilled_code = in_spilled_code_;
4371 in_spilled_code_ = false;
4372
4373 Comment cmnt(masm_, "[ LoadReference");
4374 Expression* e = ref->expression();
4375 Property* property = e->AsProperty();
4376 Variable* var = e->AsVariableProxy()->AsVariable();
4377
4378 if (property != NULL) {
4379 // The expression is either a property or a variable proxy that rewrites
4380 // to a property.
4381 Load(property->obj());
4382 // We use a named reference if the key is a literal symbol, unless it is
4383 // a string that can be legally parsed as an integer. This is because
4384 // otherwise we will not get into the slow case code that handles [] on
4385 // String objects.
4386 Literal* literal = property->key()->AsLiteral();
4387 uint32_t dummy;
4388 if (literal != NULL &&
4389 literal->handle()->IsSymbol() &&
4390 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
4391 ref->set_type(Reference::NAMED);
4392 } else {
4393 Load(property->key());
4394 ref->set_type(Reference::KEYED);
4395 }
4396 } else if (var != NULL) {
4397 // The expression is a variable proxy that does not rewrite to a
4398 // property. Global variables are treated as named property references.
4399 if (var->is_global()) {
4400 LoadGlobal();
4401 ref->set_type(Reference::NAMED);
4402 } else {
4403 ASSERT(var->slot() != NULL);
4404 ref->set_type(Reference::SLOT);
4405 }
4406 } else {
4407 // Anything else is a runtime error.
4408 Load(e);
4409 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4410 }
4411
4412 in_spilled_code_ = was_in_spilled_code;
4413}
4414
4415
4416void CodeGenerator::UnloadReference(Reference* ref) {
4417 // Pop a reference from the stack while preserving TOS.
4418 Comment cmnt(masm_, "[ UnloadReference");
4419 frame_->Nip(ref->size());
4420}
4421
4422
4423Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4424 // Currently, this assertion will fail if we try to assign to
4425 // a constant variable that is constant because it is read-only
4426 // (such as the variable referring to a named function expression).
4427 // We need to implement assignments to read-only variables.
4428 // Ideally, we should do this during AST generation (by converting
4429 // such assignments into expression statements); however, in general
4430 // we may not be able to make the decision until past AST generation,
4431 // that is when the entire program is known.
4432 ASSERT(slot != NULL);
4433 int index = slot->index();
4434 switch (slot->type()) {
4435 case Slot::PARAMETER:
4436 return frame_->ParameterAt(index);
4437
4438 case Slot::LOCAL:
4439 return frame_->LocalAt(index);
4440
4441 case Slot::CONTEXT: {
4442 // Follow the context chain if necessary.
4443 ASSERT(!tmp.is(rsi)); // do not overwrite context register
4444 Register context = rsi;
4445 int chain_length = scope()->ContextChainLength(slot->var()->scope());
4446 for (int i = 0; i < chain_length; i++) {
4447 // Load the closure.
4448 // (All contexts, even 'with' contexts, have a closure,
4449 // and it is the same for all contexts inside a function.
4450 // There is no need to go to the function context first.)
4451 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4452 // Load the function context (which is the incoming, outer context).
4453 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4454 context = tmp;
4455 }
4456 // We may have a 'with' context now. Get the function context.
4457 // (In fact this mov may never be the needed, since the scope analysis
4458 // may not permit a direct context access in this case and thus we are
4459 // always at a function context. However it is safe to dereference be-
4460 // cause the function context of a function context is itself. Before
4461 // deleting this mov we should try to create a counter-example first,
4462 // though...)
4463 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4464 return ContextOperand(tmp, index);
4465 }
4466
4467 default:
4468 UNREACHABLE();
4469 return Operand(rsp, 0);
4470 }
4471}
4472
4473
4474Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4475 Result tmp,
4476 JumpTarget* slow) {
4477 ASSERT(slot->type() == Slot::CONTEXT);
4478 ASSERT(tmp.is_register());
4479 Register context = rsi;
4480
4481 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4482 if (s->num_heap_slots() > 0) {
4483 if (s->calls_eval()) {
4484 // Check that extension is NULL.
4485 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4486 Immediate(0));
4487 slow->Branch(not_equal, not_taken);
4488 }
4489 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4490 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4491 context = tmp.reg();
4492 }
4493 }
4494 // Check that last extension is NULL.
4495 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4496 slow->Branch(not_equal, not_taken);
4497 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4498 return ContextOperand(tmp.reg(), slot->index());
4499}
4500
4501
4502void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4503 if (slot->type() == Slot::LOOKUP) {
4504 ASSERT(slot->var()->is_dynamic());
4505
4506 JumpTarget slow;
4507 JumpTarget done;
4508 Result value;
4509
4510 // Generate fast-case code for variables that might be shadowed by
4511 // eval-introduced variables. Eval is used a lot without
4512 // introducing variables. In those cases, we do not want to
4513 // perform a runtime call for all variables in the scope
4514 // containing the eval.
4515 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4516 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4517 // If there was no control flow to slow, we can exit early.
4518 if (!slow.is_linked()) {
4519 frame_->Push(&value);
4520 return;
4521 }
4522
4523 done.Jump(&value);
4524
4525 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4526 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4527 // Only generate the fast case for locals that rewrite to slots.
4528 // This rules out argument loads.
4529 if (potential_slot != NULL) {
4530 // Allocate a fresh register to use as a temp in
4531 // ContextSlotOperandCheckExtensions and to hold the result
4532 // value.
4533 value = allocator_->Allocate();
4534 ASSERT(value.is_valid());
4535 __ movq(value.reg(),
4536 ContextSlotOperandCheckExtensions(potential_slot,
4537 value,
4538 &slow));
4539 if (potential_slot->var()->mode() == Variable::CONST) {
4540 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4541 done.Branch(not_equal, &value);
4542 __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4543 }
4544 // There is always control flow to slow from
4545 // ContextSlotOperandCheckExtensions so we have to jump around
4546 // it.
4547 done.Jump(&value);
4548 }
4549 }
4550
4551 slow.Bind();
4552 // A runtime call is inevitable. We eagerly sync frame elements
4553 // to memory so that we can push the arguments directly into place
4554 // on top of the frame.
4555 frame_->SyncRange(0, frame_->element_count() - 1);
4556 frame_->EmitPush(rsi);
4557 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4558 frame_->EmitPush(kScratchRegister);
4559 if (typeof_state == INSIDE_TYPEOF) {
4560 value =
4561 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4562 } else {
4563 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4564 }
4565
4566 done.Bind(&value);
4567 frame_->Push(&value);
4568
4569 } else if (slot->var()->mode() == Variable::CONST) {
4570 // Const slots may contain 'the hole' value (the constant hasn't been
4571 // initialized yet) which needs to be converted into the 'undefined'
4572 // value.
4573 //
4574 // We currently spill the virtual frame because constants use the
4575 // potentially unsafe direct-frame access of SlotOperand.
4576 VirtualFrame::SpilledScope spilled_scope;
4577 Comment cmnt(masm_, "[ Load const");
4578 JumpTarget exit;
4579 __ movq(rcx, SlotOperand(slot, rcx));
4580 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4581 exit.Branch(not_equal);
4582 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4583 exit.Bind();
4584 frame_->EmitPush(rcx);
4585
4586 } else if (slot->type() == Slot::PARAMETER) {
4587 frame_->PushParameterAt(slot->index());
4588
4589 } else if (slot->type() == Slot::LOCAL) {
4590 frame_->PushLocalAt(slot->index());
4591
4592 } else {
4593 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4594 // here.
4595 //
4596 // The use of SlotOperand below is safe for an unspilled frame
4597 // because it will always be a context slot.
4598 ASSERT(slot->type() == Slot::CONTEXT);
4599 Result temp = allocator_->Allocate();
4600 ASSERT(temp.is_valid());
4601 __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4602 frame_->Push(&temp);
4603 }
4604}
4605
4606
4607void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4608 TypeofState state) {
4609 LoadFromSlot(slot, state);
4610
4611 // Bail out quickly if we're not using lazy arguments allocation.
4612 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4613
4614 // ... or if the slot isn't a non-parameter arguments slot.
4615 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4616
4617 // Pop the loaded value from the stack.
4618 Result value = frame_->Pop();
4619
4620 // If the loaded value is a constant, we know if the arguments
4621 // object has been lazily loaded yet.
4622 if (value.is_constant()) {
4623 if (value.handle()->IsTheHole()) {
4624 Result arguments = StoreArgumentsObject(false);
4625 frame_->Push(&arguments);
4626 } else {
4627 frame_->Push(&value);
4628 }
4629 return;
4630 }
4631
4632 // The loaded value is in a register. If it is the sentinel that
4633 // indicates that we haven't loaded the arguments object yet, we
4634 // need to do it now.
4635 JumpTarget exit;
4636 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4637 frame_->Push(&value);
4638 exit.Branch(not_equal);
4639 Result arguments = StoreArgumentsObject(false);
4640 frame_->SetElementAt(0, &arguments);
4641 exit.Bind();
4642}
4643
4644
4645void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4646 if (slot->type() == Slot::LOOKUP) {
4647 ASSERT(slot->var()->is_dynamic());
4648
4649 // For now, just do a runtime call. Since the call is inevitable,
4650 // we eagerly sync the virtual frame so we can directly push the
4651 // arguments into place.
4652 frame_->SyncRange(0, frame_->element_count() - 1);
4653
4654 frame_->EmitPush(rsi);
4655 frame_->EmitPush(slot->var()->name());
4656
4657 Result value;
4658 if (init_state == CONST_INIT) {
4659 // Same as the case for a normal store, but ignores attribute
4660 // (e.g. READ_ONLY) of context slot so that we can initialize const
4661 // properties (introduced via eval("const foo = (some expr);")). Also,
4662 // uses the current function context instead of the top context.
4663 //
4664 // Note that we must declare the foo upon entry of eval(), via a
4665 // context slot declaration, but we cannot initialize it at the same
4666 // time, because the const declaration may be at the end of the eval
4667 // code (sigh...) and the const variable may have been used before
4668 // (where its value is 'undefined'). Thus, we can only do the
4669 // initialization when we actually encounter the expression and when
4670 // the expression operands are defined and valid, and thus we need the
4671 // split into 2 operations: declaration of the context slot followed
4672 // by initialization.
4673 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4674 } else {
4675 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4676 }
4677 // Storing a variable must keep the (new) value on the expression
4678 // stack. This is necessary for compiling chained assignment
4679 // expressions.
4680 frame_->Push(&value);
4681 } else {
4682 ASSERT(!slot->var()->is_dynamic());
4683
4684 JumpTarget exit;
4685 if (init_state == CONST_INIT) {
4686 ASSERT(slot->var()->mode() == Variable::CONST);
4687 // Only the first const initialization must be executed (the slot
4688 // still contains 'the hole' value). When the assignment is executed,
4689 // the code is identical to a normal store (see below).
4690 //
4691 // We spill the frame in the code below because the direct-frame
4692 // access of SlotOperand is potentially unsafe with an unspilled
4693 // frame.
4694 VirtualFrame::SpilledScope spilled_scope;
4695 Comment cmnt(masm_, "[ Init const");
4696 __ movq(rcx, SlotOperand(slot, rcx));
4697 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4698 exit.Branch(not_equal);
4699 }
4700
4701 // We must execute the store. Storing a variable must keep the (new)
4702 // value on the stack. This is necessary for compiling assignment
4703 // expressions.
4704 //
4705 // Note: We will reach here even with slot->var()->mode() ==
4706 // Variable::CONST because of const declarations which will initialize
4707 // consts to 'the hole' value and by doing so, end up calling this code.
4708 if (slot->type() == Slot::PARAMETER) {
4709 frame_->StoreToParameterAt(slot->index());
4710 } else if (slot->type() == Slot::LOCAL) {
4711 frame_->StoreToLocalAt(slot->index());
4712 } else {
4713 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4714 //
4715 // The use of SlotOperand below is safe for an unspilled frame
4716 // because the slot is a context slot.
4717 ASSERT(slot->type() == Slot::CONTEXT);
4718 frame_->Dup();
4719 Result value = frame_->Pop();
4720 value.ToRegister();
4721 Result start = allocator_->Allocate();
4722 ASSERT(start.is_valid());
4723 __ movq(SlotOperand(slot, start.reg()), value.reg());
4724 // RecordWrite may destroy the value registers.
4725 //
4726 // TODO(204): Avoid actually spilling when the value is not
4727 // needed (probably the common case).
4728 frame_->Spill(value.reg());
4729 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4730 Result temp = allocator_->Allocate();
4731 ASSERT(temp.is_valid());
4732 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4733 // The results start, value, and temp are unused by going out of
4734 // scope.
4735 }
4736
4737 exit.Bind();
4738 }
4739}
4740
4741
4742Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4743 Slot* slot,
4744 TypeofState typeof_state,
4745 JumpTarget* slow) {
4746 // Check that no extension objects have been created by calls to
4747 // eval from the current scope to the global scope.
4748 Register context = rsi;
4749 Result tmp = allocator_->Allocate();
4750 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
4751
4752 Scope* s = scope();
4753 while (s != NULL) {
4754 if (s->num_heap_slots() > 0) {
4755 if (s->calls_eval()) {
4756 // Check that extension is NULL.
4757 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4758 Immediate(0));
4759 slow->Branch(not_equal, not_taken);
4760 }
4761 // Load next context in chain.
4762 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4763 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4764 context = tmp.reg();
4765 }
4766 // If no outer scope calls eval, we do not need to check more
4767 // context extensions. If we have reached an eval scope, we check
4768 // all extensions from this point.
4769 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4770 s = s->outer_scope();
4771 }
4772
4773 if (s->is_eval_scope()) {
4774 // Loop up the context chain. There is no frame effect so it is
4775 // safe to use raw labels here.
4776 Label next, fast;
4777 if (!context.is(tmp.reg())) {
4778 __ movq(tmp.reg(), context);
4779 }
4780 // Load map for comparison into register, outside loop.
4781 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4782 __ bind(&next);
4783 // Terminate at global context.
4784 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4785 __ j(equal, &fast);
4786 // Check that extension is NULL.
4787 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4788 slow->Branch(not_equal);
4789 // Load next context in chain.
4790 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4791 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4792 __ jmp(&next);
4793 __ bind(&fast);
4794 }
4795 tmp.Unuse();
4796
4797 // All extension objects were empty and it is safe to use a global
4798 // load IC call.
4799 LoadGlobal();
4800 frame_->Push(slot->var()->name());
4801 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4802 ? RelocInfo::CODE_TARGET
4803 : RelocInfo::CODE_TARGET_CONTEXT;
4804 Result answer = frame_->CallLoadIC(mode);
4805 // A test rax instruction following the call signals that the inobject
4806 // property case was inlined. Ensure that there is not a test rax
4807 // instruction here.
4808 masm_->nop();
4809 // Discard the global object. The result is in answer.
4810 frame_->Drop();
4811 return answer;
4812}
4813
4814
4815void CodeGenerator::LoadGlobal() {
4816 if (in_spilled_code()) {
4817 frame_->EmitPush(GlobalObject());
4818 } else {
4819 Result temp = allocator_->Allocate();
4820 __ movq(temp.reg(), GlobalObject());
4821 frame_->Push(&temp);
4822 }
4823}
4824
4825
4826void CodeGenerator::LoadGlobalReceiver() {
4827 Result temp = allocator_->Allocate();
4828 Register reg = temp.reg();
4829 __ movq(reg, GlobalObject());
4830 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
4831 frame_->Push(&temp);
4832}
4833
4834
4835ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
4836 if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
4837 ASSERT(scope_->arguments_shadow() != NULL);
4838 // We don't want to do lazy arguments allocation for functions that
4839 // have heap-allocated contexts, because it interfers with the
4840 // uninitialized const tracking in the context objects.
4841 return (scope_->num_heap_slots() > 0)
4842 ? EAGER_ARGUMENTS_ALLOCATION
4843 : LAZY_ARGUMENTS_ALLOCATION;
4844}
4845
4846
4847Result CodeGenerator::StoreArgumentsObject(bool initial) {
4848 ArgumentsAllocationMode mode = ArgumentsMode();
4849 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
4850
4851 Comment cmnt(masm_, "[ store arguments object");
4852 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
4853 // When using lazy arguments allocation, we store the hole value
4854 // as a sentinel indicating that the arguments object hasn't been
4855 // allocated yet.
4856 frame_->Push(Factory::the_hole_value());
4857 } else {
4858 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
4859 frame_->PushFunction();
4860 frame_->PushReceiverSlotAddress();
4861 frame_->Push(Smi::FromInt(scope_->num_parameters()));
4862 Result result = frame_->CallStub(&stub, 3);
4863 frame_->Push(&result);
4864 }
4865
4866 { Reference shadow_ref(this, scope_->arguments_shadow());
4867 Reference arguments_ref(this, scope_->arguments());
4868 ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
4869 // Here we rely on the convenient property that references to slot
4870 // take up zero space in the frame (ie, it doesn't matter that the
4871 // stored value is actually below the reference on the frame).
4872 JumpTarget done;
4873 bool skip_arguments = false;
4874 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
4875 // We have to skip storing into the arguments slot if it has
4876 // already been written to. This can happen if the a function
4877 // has a local variable named 'arguments'.
4878 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
4879 Result arguments = frame_->Pop();
4880 if (arguments.is_constant()) {
4881 // We have to skip updating the arguments object if it has
4882 // been assigned a proper value.
4883 skip_arguments = !arguments.handle()->IsTheHole();
4884 } else {
4885 __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
4886 arguments.Unuse();
4887 done.Branch(not_equal);
4888 }
4889 }
4890 if (!skip_arguments) {
4891 arguments_ref.SetValue(NOT_CONST_INIT);
4892 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
4893 }
4894 shadow_ref.SetValue(NOT_CONST_INIT);
4895 }
4896 return frame_->Pop();
4897}
4898
4899
Steve Blockd0582a62009-12-15 09:54:21 +00004900void CodeGenerator::LoadTypeofExpression(Expression* expr) {
4901 // Special handling of identifiers as subexpressions of typeof.
4902 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +00004903 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +00004904 // For a global variable we build the property reference
4905 // <global>.<variable> and perform a (regular non-contextual) property
4906 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +00004907 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
4908 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +00004909 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +00004910 Reference ref(this, &property);
4911 ref.GetValue();
4912 } else if (variable != NULL && variable->slot() != NULL) {
4913 // For a variable that rewrites to a slot, we signal it is the immediate
4914 // subexpression of a typeof.
4915 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004916 } else {
Steve Blockd0582a62009-12-15 09:54:21 +00004917 // Anything else can be handled normally.
4918 Load(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +00004919 }
4920}
4921
4922
4923void CodeGenerator::Comparison(Condition cc,
4924 bool strict,
4925 ControlDestination* dest) {
4926 // Strict only makes sense for equality comparisons.
4927 ASSERT(!strict || cc == equal);
4928
4929 Result left_side;
4930 Result right_side;
4931 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
4932 if (cc == greater || cc == less_equal) {
4933 cc = ReverseCondition(cc);
4934 left_side = frame_->Pop();
4935 right_side = frame_->Pop();
4936 } else {
4937 right_side = frame_->Pop();
4938 left_side = frame_->Pop();
4939 }
4940 ASSERT(cc == less || cc == equal || cc == greater_equal);
4941
4942 // If either side is a constant smi, optimize the comparison.
4943 bool left_side_constant_smi =
4944 left_side.is_constant() && left_side.handle()->IsSmi();
4945 bool right_side_constant_smi =
4946 right_side.is_constant() && right_side.handle()->IsSmi();
4947 bool left_side_constant_null =
4948 left_side.is_constant() && left_side.handle()->IsNull();
4949 bool right_side_constant_null =
4950 right_side.is_constant() && right_side.handle()->IsNull();
4951
4952 if (left_side_constant_smi || right_side_constant_smi) {
4953 if (left_side_constant_smi && right_side_constant_smi) {
4954 // Trivial case, comparing two constants.
4955 int left_value = Smi::cast(*left_side.handle())->value();
4956 int right_value = Smi::cast(*right_side.handle())->value();
4957 switch (cc) {
4958 case less:
4959 dest->Goto(left_value < right_value);
4960 break;
4961 case equal:
4962 dest->Goto(left_value == right_value);
4963 break;
4964 case greater_equal:
4965 dest->Goto(left_value >= right_value);
4966 break;
4967 default:
4968 UNREACHABLE();
4969 }
4970 } else { // Only one side is a constant Smi.
4971 // If left side is a constant Smi, reverse the operands.
4972 // Since one side is a constant Smi, conversion order does not matter.
4973 if (left_side_constant_smi) {
4974 Result temp = left_side;
4975 left_side = right_side;
4976 right_side = temp;
4977 cc = ReverseCondition(cc);
4978 // This may reintroduce greater or less_equal as the value of cc.
4979 // CompareStub and the inline code both support all values of cc.
4980 }
4981 // Implement comparison against a constant Smi, inlining the case
4982 // where both sides are Smis.
4983 left_side.ToRegister();
4984
4985 // Here we split control flow to the stub call and inlined cases
4986 // before finally splitting it to the control destination. We use
4987 // a jump target and branching to duplicate the virtual frame at
4988 // the first split. We manually handle the off-frame references
4989 // by reconstituting them on the non-fall-through path.
4990 JumpTarget is_smi;
4991 Register left_reg = left_side.reg();
4992 Handle<Object> right_val = right_side.handle();
4993
4994 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
4995 is_smi.Branch(left_is_smi);
4996
4997 // Setup and call the compare stub.
4998 CompareStub stub(cc, strict);
4999 Result result = frame_->CallStub(&stub, &left_side, &right_side);
5000 result.ToRegister();
5001 __ testq(result.reg(), result.reg());
5002 result.Unuse();
5003 dest->true_target()->Branch(cc);
5004 dest->false_target()->Jump();
5005
5006 is_smi.Bind();
5007 left_side = Result(left_reg);
5008 right_side = Result(right_val);
5009 // Test smi equality and comparison by signed int comparison.
5010 // Both sides are smis, so we can use an Immediate.
Steve Block3ce2e202009-11-05 08:53:23 +00005011 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
Steve Blocka7e24c12009-10-30 11:49:00 +00005012 left_side.Unuse();
5013 right_side.Unuse();
5014 dest->Split(cc);
5015 }
5016 } else if (cc == equal &&
5017 (left_side_constant_null || right_side_constant_null)) {
5018 // To make null checks efficient, we check if either the left side or
5019 // the right side is the constant 'null'.
5020 // If so, we optimize the code by inlining a null check instead of
5021 // calling the (very) general runtime routine for checking equality.
5022 Result operand = left_side_constant_null ? right_side : left_side;
5023 right_side.Unuse();
5024 left_side.Unuse();
5025 operand.ToRegister();
5026 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
5027 if (strict) {
5028 operand.Unuse();
5029 dest->Split(equal);
5030 } else {
5031 // The 'null' value is only equal to 'undefined' if using non-strict
5032 // comparisons.
5033 dest->true_target()->Branch(equal);
5034 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
5035 dest->true_target()->Branch(equal);
5036 Condition is_smi = masm_->CheckSmi(operand.reg());
5037 dest->false_target()->Branch(is_smi);
5038
5039 // It can be an undetectable object.
5040 // Use a scratch register in preference to spilling operand.reg().
5041 Result temp = allocator()->Allocate();
5042 ASSERT(temp.is_valid());
5043 __ movq(temp.reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005044 FieldOperand(operand.reg(), HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005045 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
5046 Immediate(1 << Map::kIsUndetectable));
5047 temp.Unuse();
5048 operand.Unuse();
5049 dest->Split(not_zero);
5050 }
5051 } else { // Neither side is a constant Smi or null.
5052 // If either side is a non-smi constant, skip the smi check.
5053 bool known_non_smi =
5054 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
5055 (right_side.is_constant() && !right_side.handle()->IsSmi());
5056 left_side.ToRegister();
5057 right_side.ToRegister();
5058
5059 if (known_non_smi) {
5060 // When non-smi, call out to the compare stub.
5061 CompareStub stub(cc, strict);
5062 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5063 // The result is a Smi, which is negative, zero, or positive.
Steve Block3ce2e202009-11-05 08:53:23 +00005064 __ SmiTest(answer.reg()); // Sets both zero and sign flag.
Steve Blocka7e24c12009-10-30 11:49:00 +00005065 answer.Unuse();
5066 dest->Split(cc);
5067 } else {
5068 // Here we split control flow to the stub call and inlined cases
5069 // before finally splitting it to the control destination. We use
5070 // a jump target and branching to duplicate the virtual frame at
5071 // the first split. We manually handle the off-frame references
5072 // by reconstituting them on the non-fall-through path.
5073 JumpTarget is_smi;
5074 Register left_reg = left_side.reg();
5075 Register right_reg = right_side.reg();
5076
5077 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5078 is_smi.Branch(both_smi);
5079 // When non-smi, call out to the compare stub.
5080 CompareStub stub(cc, strict);
5081 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
Steve Block3ce2e202009-11-05 08:53:23 +00005082 __ SmiTest(answer.reg()); // Sets both zero and sign flags.
Steve Blocka7e24c12009-10-30 11:49:00 +00005083 answer.Unuse();
5084 dest->true_target()->Branch(cc);
5085 dest->false_target()->Jump();
5086
5087 is_smi.Bind();
5088 left_side = Result(left_reg);
5089 right_side = Result(right_reg);
Steve Block3ce2e202009-11-05 08:53:23 +00005090 __ SmiCompare(left_side.reg(), right_side.reg());
Steve Blocka7e24c12009-10-30 11:49:00 +00005091 right_side.Unuse();
5092 left_side.Unuse();
5093 dest->Split(cc);
5094 }
5095 }
5096}
5097
5098
5099class DeferredInlineBinaryOperation: public DeferredCode {
5100 public:
5101 DeferredInlineBinaryOperation(Token::Value op,
5102 Register dst,
5103 Register left,
5104 Register right,
5105 OverwriteMode mode)
5106 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5107 set_comment("[ DeferredInlineBinaryOperation");
5108 }
5109
5110 virtual void Generate();
5111
5112 private:
5113 Token::Value op_;
5114 Register dst_;
5115 Register left_;
5116 Register right_;
5117 OverwriteMode mode_;
5118};
5119
5120
5121void DeferredInlineBinaryOperation::Generate() {
Steve Blockd0582a62009-12-15 09:54:21 +00005122 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
5123 stub.GenerateCall(masm_, left_, right_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005124 if (!dst_.is(rax)) __ movq(dst_, rax);
5125}
5126
5127
5128void CodeGenerator::GenericBinaryOperation(Token::Value op,
5129 SmiAnalysis* type,
5130 OverwriteMode overwrite_mode) {
5131 Comment cmnt(masm_, "[ BinaryOperation");
5132 Comment cmnt_token(masm_, Token::String(op));
5133
5134 if (op == Token::COMMA) {
5135 // Simply discard left value.
5136 frame_->Nip(1);
5137 return;
5138 }
5139
5140 // Set the flags based on the operation, type and loop nesting level.
5141 GenericBinaryFlags flags;
5142 switch (op) {
5143 case Token::BIT_OR:
5144 case Token::BIT_AND:
5145 case Token::BIT_XOR:
5146 case Token::SHL:
5147 case Token::SHR:
5148 case Token::SAR:
5149 // Bit operations always assume they likely operate on Smis. Still only
5150 // generate the inline Smi check code if this operation is part of a loop.
5151 flags = (loop_nesting() > 0)
Steve Blockd0582a62009-12-15 09:54:21 +00005152 ? NO_SMI_CODE_IN_STUB
5153 : NO_GENERIC_BINARY_FLAGS;
Steve Blocka7e24c12009-10-30 11:49:00 +00005154 break;
5155
5156 default:
5157 // By default only inline the Smi check code for likely smis if this
5158 // operation is part of a loop.
5159 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
Steve Blockd0582a62009-12-15 09:54:21 +00005160 ? NO_SMI_CODE_IN_STUB
5161 : NO_GENERIC_BINARY_FLAGS;
Steve Blocka7e24c12009-10-30 11:49:00 +00005162 break;
5163 }
5164
5165 Result right = frame_->Pop();
5166 Result left = frame_->Pop();
5167
5168 if (op == Token::ADD) {
5169 bool left_is_string = left.is_constant() && left.handle()->IsString();
5170 bool right_is_string = right.is_constant() && right.handle()->IsString();
5171 if (left_is_string || right_is_string) {
5172 frame_->Push(&left);
5173 frame_->Push(&right);
5174 Result answer;
5175 if (left_is_string) {
5176 if (right_is_string) {
5177 // TODO(lrn): if both are constant strings
5178 // -- do a compile time cons, if allocation during codegen is allowed.
5179 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
5180 } else {
5181 answer =
5182 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5183 }
5184 } else if (right_is_string) {
5185 answer =
5186 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5187 }
5188 frame_->Push(&answer);
5189 return;
5190 }
5191 // Neither operand is known to be a string.
5192 }
5193
5194 bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
5195 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
5196 bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
5197 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
5198 bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
5199
5200 if (left_is_smi && right_is_smi) {
5201 // Compute the constant result at compile time, and leave it on the frame.
5202 int left_int = Smi::cast(*left.handle())->value();
5203 int right_int = Smi::cast(*right.handle())->value();
5204 if (FoldConstantSmis(op, left_int, right_int)) return;
5205 }
5206
5207 if (left_is_non_smi || right_is_non_smi) {
5208 // Set flag so that we go straight to the slow case, with no smi code.
5209 generate_no_smi_code = true;
5210 } else if (right_is_smi) {
5211 ConstantSmiBinaryOperation(op, &left, right.handle(),
5212 type, false, overwrite_mode);
5213 return;
5214 } else if (left_is_smi) {
5215 ConstantSmiBinaryOperation(op, &right, left.handle(),
5216 type, true, overwrite_mode);
5217 return;
5218 }
5219
Steve Blockd0582a62009-12-15 09:54:21 +00005220 if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005221 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5222 } else {
5223 frame_->Push(&left);
5224 frame_->Push(&right);
5225 // If we know the arguments aren't smis, use the binary operation stub
5226 // that does not check for the fast smi case.
5227 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
5228 if (generate_no_smi_code) {
Steve Blockd0582a62009-12-15 09:54:21 +00005229 flags = NO_SMI_CODE_IN_STUB;
Steve Blocka7e24c12009-10-30 11:49:00 +00005230 }
5231 GenericBinaryOpStub stub(op, overwrite_mode, flags);
5232 Result answer = frame_->CallStub(&stub, 2);
5233 frame_->Push(&answer);
5234 }
5235}
5236
5237
5238// Emit a LoadIC call to get the value from receiver and leave it in
5239// dst. The receiver register is restored after the call.
5240class DeferredReferenceGetNamedValue: public DeferredCode {
5241 public:
5242 DeferredReferenceGetNamedValue(Register dst,
5243 Register receiver,
5244 Handle<String> name)
5245 : dst_(dst), receiver_(receiver), name_(name) {
5246 set_comment("[ DeferredReferenceGetNamedValue");
5247 }
5248
5249 virtual void Generate();
5250
5251 Label* patch_site() { return &patch_site_; }
5252
5253 private:
5254 Label patch_site_;
5255 Register dst_;
5256 Register receiver_;
5257 Handle<String> name_;
5258};
5259
5260
5261void DeferredReferenceGetNamedValue::Generate() {
5262 __ push(receiver_);
5263 __ Move(rcx, name_);
5264 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5265 __ Call(ic, RelocInfo::CODE_TARGET);
5266 // The call must be followed by a test rax instruction to indicate
5267 // that the inobject property case was inlined.
5268 //
5269 // Store the delta to the map check instruction here in the test
5270 // instruction. Use masm_-> instead of the __ macro since the
5271 // latter can't return a value.
5272 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5273 // Here we use masm_-> instead of the __ macro because this is the
5274 // instruction that gets patched and coverage code gets in the way.
5275 masm_->testl(rax, Immediate(-delta_to_patch_site));
5276 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5277
5278 if (!dst_.is(rax)) __ movq(dst_, rax);
5279 __ pop(receiver_);
5280}
5281
5282
5283void DeferredInlineSmiAdd::Generate() {
Steve Blockd0582a62009-12-15 09:54:21 +00005284 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5285 igostub.GenerateCall(masm_, dst_, value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005286 if (!dst_.is(rax)) __ movq(dst_, rax);
5287}
5288
5289
5290void DeferredInlineSmiAddReversed::Generate() {
Steve Blockd0582a62009-12-15 09:54:21 +00005291 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5292 igostub.GenerateCall(masm_, value_, dst_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005293 if (!dst_.is(rax)) __ movq(dst_, rax);
5294}
5295
5296
5297void DeferredInlineSmiSub::Generate() {
Steve Blockd0582a62009-12-15 09:54:21 +00005298 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5299 igostub.GenerateCall(masm_, dst_, value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005300 if (!dst_.is(rax)) __ movq(dst_, rax);
5301}
5302
5303
5304void DeferredInlineSmiOperation::Generate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005305 // For mod we don't generate all the Smi code inline.
5306 GenericBinaryOpStub stub(
5307 op_,
5308 overwrite_mode_,
Steve Blockd0582a62009-12-15 09:54:21 +00005309 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
5310 stub.GenerateCall(masm_, src_, value_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005311 if (!dst_.is(rax)) __ movq(dst_, rax);
5312}
5313
5314
5315void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5316 Result* operand,
5317 Handle<Object> value,
5318 SmiAnalysis* type,
5319 bool reversed,
5320 OverwriteMode overwrite_mode) {
5321 // NOTE: This is an attempt to inline (a bit) more of the code for
5322 // some possible smi operations (like + and -) when (at least) one
5323 // of the operands is a constant smi.
5324 // Consumes the argument "operand".
5325
5326 // TODO(199): Optimize some special cases of operations involving a
5327 // smi literal (multiply by 2, shift by 0, etc.).
5328 if (IsUnsafeSmi(value)) {
5329 Result unsafe_operand(value);
5330 if (reversed) {
5331 LikelySmiBinaryOperation(op, &unsafe_operand, operand,
5332 overwrite_mode);
5333 } else {
5334 LikelySmiBinaryOperation(op, operand, &unsafe_operand,
5335 overwrite_mode);
5336 }
5337 ASSERT(!operand->is_valid());
5338 return;
5339 }
5340
5341 // Get the literal value.
5342 Smi* smi_value = Smi::cast(*value);
5343 int int_value = smi_value->value();
5344
5345 switch (op) {
5346 case Token::ADD: {
5347 operand->ToRegister();
5348 frame_->Spill(operand->reg());
5349 DeferredCode* deferred = NULL;
5350 if (reversed) {
5351 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5352 smi_value,
5353 overwrite_mode);
5354 } else {
5355 deferred = new DeferredInlineSmiAdd(operand->reg(),
5356 smi_value,
5357 overwrite_mode);
5358 }
5359 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5360 __ SmiAddConstant(operand->reg(),
5361 operand->reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005362 smi_value,
Steve Blocka7e24c12009-10-30 11:49:00 +00005363 deferred->entry_label());
5364 deferred->BindExit();
5365 frame_->Push(operand);
5366 break;
5367 }
5368
5369 case Token::SUB: {
5370 if (reversed) {
5371 Result constant_operand(value);
5372 LikelySmiBinaryOperation(op, &constant_operand, operand,
5373 overwrite_mode);
5374 } else {
5375 operand->ToRegister();
5376 frame_->Spill(operand->reg());
5377 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5378 smi_value,
5379 overwrite_mode);
5380 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5381 // A smi currently fits in a 32-bit Immediate.
5382 __ SmiSubConstant(operand->reg(),
5383 operand->reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005384 smi_value,
Steve Blocka7e24c12009-10-30 11:49:00 +00005385 deferred->entry_label());
5386 deferred->BindExit();
5387 frame_->Push(operand);
5388 }
5389 break;
5390 }
5391
5392 case Token::SAR:
5393 if (reversed) {
5394 Result constant_operand(value);
5395 LikelySmiBinaryOperation(op, &constant_operand, operand,
5396 overwrite_mode);
5397 } else {
5398 // Only the least significant 5 bits of the shift value are used.
5399 // In the slow case, this masking is done inside the runtime call.
5400 int shift_value = int_value & 0x1f;
5401 operand->ToRegister();
5402 frame_->Spill(operand->reg());
5403 DeferredInlineSmiOperation* deferred =
5404 new DeferredInlineSmiOperation(op,
5405 operand->reg(),
5406 operand->reg(),
5407 smi_value,
5408 overwrite_mode);
5409 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5410 __ SmiShiftArithmeticRightConstant(operand->reg(),
5411 operand->reg(),
5412 shift_value);
5413 deferred->BindExit();
5414 frame_->Push(operand);
5415 }
5416 break;
5417
5418 case Token::SHR:
5419 if (reversed) {
5420 Result constant_operand(value);
5421 LikelySmiBinaryOperation(op, &constant_operand, operand,
5422 overwrite_mode);
5423 } else {
5424 // Only the least significant 5 bits of the shift value are used.
5425 // In the slow case, this masking is done inside the runtime call.
5426 int shift_value = int_value & 0x1f;
5427 operand->ToRegister();
5428 Result answer = allocator()->Allocate();
5429 ASSERT(answer.is_valid());
5430 DeferredInlineSmiOperation* deferred =
5431 new DeferredInlineSmiOperation(op,
5432 answer.reg(),
5433 operand->reg(),
5434 smi_value,
5435 overwrite_mode);
5436 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5437 __ SmiShiftLogicalRightConstant(answer.reg(),
Steve Block3ce2e202009-11-05 08:53:23 +00005438 operand->reg(),
5439 shift_value,
5440 deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00005441 deferred->BindExit();
5442 operand->Unuse();
5443 frame_->Push(&answer);
5444 }
5445 break;
5446
5447 case Token::SHL:
5448 if (reversed) {
5449 Result constant_operand(value);
5450 LikelySmiBinaryOperation(op, &constant_operand, operand,
5451 overwrite_mode);
5452 } else {
5453 // Only the least significant 5 bits of the shift value are used.
5454 // In the slow case, this masking is done inside the runtime call.
5455 int shift_value = int_value & 0x1f;
5456 operand->ToRegister();
5457 if (shift_value == 0) {
5458 // Spill operand so it can be overwritten in the slow case.
5459 frame_->Spill(operand->reg());
5460 DeferredInlineSmiOperation* deferred =
5461 new DeferredInlineSmiOperation(op,
5462 operand->reg(),
5463 operand->reg(),
5464 smi_value,
5465 overwrite_mode);
5466 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5467 deferred->BindExit();
5468 frame_->Push(operand);
5469 } else {
5470 // Use a fresh temporary for nonzero shift values.
5471 Result answer = allocator()->Allocate();
5472 ASSERT(answer.is_valid());
5473 DeferredInlineSmiOperation* deferred =
5474 new DeferredInlineSmiOperation(op,
5475 answer.reg(),
5476 operand->reg(),
5477 smi_value,
5478 overwrite_mode);
5479 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5480 __ SmiShiftLeftConstant(answer.reg(),
5481 operand->reg(),
5482 shift_value,
5483 deferred->entry_label());
5484 deferred->BindExit();
5485 operand->Unuse();
5486 frame_->Push(&answer);
5487 }
5488 }
5489 break;
5490
5491 case Token::BIT_OR:
5492 case Token::BIT_XOR:
5493 case Token::BIT_AND: {
5494 operand->ToRegister();
5495 frame_->Spill(operand->reg());
5496 if (reversed) {
5497 // Bit operations with a constant smi are commutative.
5498 // We can swap left and right operands with no problem.
5499 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
5500 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5501 }
5502 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5503 operand->reg(),
5504 operand->reg(),
5505 smi_value,
5506 overwrite_mode);
5507 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5508 if (op == Token::BIT_AND) {
Steve Block3ce2e202009-11-05 08:53:23 +00005509 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005510 } else if (op == Token::BIT_XOR) {
5511 if (int_value != 0) {
Steve Block3ce2e202009-11-05 08:53:23 +00005512 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005513 }
5514 } else {
5515 ASSERT(op == Token::BIT_OR);
5516 if (int_value != 0) {
Steve Block3ce2e202009-11-05 08:53:23 +00005517 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00005518 }
5519 }
5520 deferred->BindExit();
5521 frame_->Push(operand);
5522 break;
5523 }
5524
5525 // Generate inline code for mod of powers of 2 and negative powers of 2.
5526 case Token::MOD:
5527 if (!reversed &&
5528 int_value != 0 &&
5529 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5530 operand->ToRegister();
5531 frame_->Spill(operand->reg());
Steve Block3ce2e202009-11-05 08:53:23 +00005532 DeferredCode* deferred =
5533 new DeferredInlineSmiOperation(op,
5534 operand->reg(),
5535 operand->reg(),
5536 smi_value,
5537 overwrite_mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00005538 // Check for negative or non-Smi left hand side.
5539 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5540 if (int_value < 0) int_value = -int_value;
5541 if (int_value == 1) {
Steve Block3ce2e202009-11-05 08:53:23 +00005542 __ Move(operand->reg(), Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00005543 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00005544 __ SmiAndConstant(operand->reg(),
5545 operand->reg(),
5546 Smi::FromInt(int_value - 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00005547 }
5548 deferred->BindExit();
5549 frame_->Push(operand);
5550 break; // This break only applies if we generated code for MOD.
5551 }
5552 // Fall through if we did not find a power of 2 on the right hand side!
5553 // The next case must be the default.
5554
5555 default: {
5556 Result constant_operand(value);
5557 if (reversed) {
5558 LikelySmiBinaryOperation(op, &constant_operand, operand,
5559 overwrite_mode);
5560 } else {
5561 LikelySmiBinaryOperation(op, operand, &constant_operand,
5562 overwrite_mode);
5563 }
5564 break;
5565 }
5566 }
5567 ASSERT(!operand->is_valid());
5568}
5569
5570void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
5571 Result* left,
5572 Result* right,
5573 OverwriteMode overwrite_mode) {
5574 // Special handling of div and mod because they use fixed registers.
5575 if (op == Token::DIV || op == Token::MOD) {
5576 // We need rax as the quotient register, rdx as the remainder
5577 // register, neither left nor right in rax or rdx, and left copied
5578 // to rax.
5579 Result quotient;
5580 Result remainder;
5581 bool left_is_in_rax = false;
5582 // Step 1: get rax for quotient.
5583 if ((left->is_register() && left->reg().is(rax)) ||
5584 (right->is_register() && right->reg().is(rax))) {
5585 // One or both is in rax. Use a fresh non-rdx register for
5586 // them.
5587 Result fresh = allocator_->Allocate();
5588 ASSERT(fresh.is_valid());
5589 if (fresh.reg().is(rdx)) {
5590 remainder = fresh;
5591 fresh = allocator_->Allocate();
5592 ASSERT(fresh.is_valid());
5593 }
5594 if (left->is_register() && left->reg().is(rax)) {
5595 quotient = *left;
5596 *left = fresh;
5597 left_is_in_rax = true;
5598 }
5599 if (right->is_register() && right->reg().is(rax)) {
5600 quotient = *right;
5601 *right = fresh;
5602 }
5603 __ movq(fresh.reg(), rax);
5604 } else {
5605 // Neither left nor right is in rax.
5606 quotient = allocator_->Allocate(rax);
5607 }
5608 ASSERT(quotient.is_register() && quotient.reg().is(rax));
5609 ASSERT(!(left->is_register() && left->reg().is(rax)));
5610 ASSERT(!(right->is_register() && right->reg().is(rax)));
5611
5612 // Step 2: get rdx for remainder if necessary.
5613 if (!remainder.is_valid()) {
5614 if ((left->is_register() && left->reg().is(rdx)) ||
5615 (right->is_register() && right->reg().is(rdx))) {
5616 Result fresh = allocator_->Allocate();
5617 ASSERT(fresh.is_valid());
5618 if (left->is_register() && left->reg().is(rdx)) {
5619 remainder = *left;
5620 *left = fresh;
5621 }
5622 if (right->is_register() && right->reg().is(rdx)) {
5623 remainder = *right;
5624 *right = fresh;
5625 }
5626 __ movq(fresh.reg(), rdx);
5627 } else {
5628 // Neither left nor right is in rdx.
5629 remainder = allocator_->Allocate(rdx);
5630 }
5631 }
5632 ASSERT(remainder.is_register() && remainder.reg().is(rdx));
5633 ASSERT(!(left->is_register() && left->reg().is(rdx)));
5634 ASSERT(!(right->is_register() && right->reg().is(rdx)));
5635
5636 left->ToRegister();
5637 right->ToRegister();
5638 frame_->Spill(rax);
5639 frame_->Spill(rdx);
5640
5641 // Check that left and right are smi tagged.
5642 DeferredInlineBinaryOperation* deferred =
5643 new DeferredInlineBinaryOperation(op,
5644 (op == Token::DIV) ? rax : rdx,
5645 left->reg(),
5646 right->reg(),
5647 overwrite_mode);
5648 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5649
5650 if (op == Token::DIV) {
5651 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5652 deferred->BindExit();
5653 left->Unuse();
5654 right->Unuse();
5655 frame_->Push(&quotient);
5656 } else {
5657 ASSERT(op == Token::MOD);
5658 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5659 deferred->BindExit();
5660 left->Unuse();
5661 right->Unuse();
5662 frame_->Push(&remainder);
5663 }
5664 return;
5665 }
5666
5667 // Special handling of shift operations because they use fixed
5668 // registers.
5669 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
5670 // Move left out of rcx if necessary.
5671 if (left->is_register() && left->reg().is(rcx)) {
5672 *left = allocator_->Allocate();
5673 ASSERT(left->is_valid());
5674 __ movq(left->reg(), rcx);
5675 }
5676 right->ToRegister(rcx);
5677 left->ToRegister();
5678 ASSERT(left->is_register() && !left->reg().is(rcx));
5679 ASSERT(right->is_register() && right->reg().is(rcx));
5680
5681 // We will modify right, it must be spilled.
5682 frame_->Spill(rcx);
5683
5684 // Use a fresh answer register to avoid spilling the left operand.
5685 Result answer = allocator_->Allocate();
5686 ASSERT(answer.is_valid());
5687 // Check that both operands are smis using the answer register as a
5688 // temporary.
5689 DeferredInlineBinaryOperation* deferred =
5690 new DeferredInlineBinaryOperation(op,
5691 answer.reg(),
5692 left->reg(),
5693 rcx,
5694 overwrite_mode);
5695 __ movq(answer.reg(), left->reg());
5696 __ or_(answer.reg(), rcx);
5697 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5698
5699 // Perform the operation.
5700 switch (op) {
5701 case Token::SAR:
5702 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5703 break;
5704 case Token::SHR: {
5705 __ SmiShiftLogicalRight(answer.reg(),
5706 left->reg(),
5707 rcx,
5708 deferred->entry_label());
5709 break;
5710 }
5711 case Token::SHL: {
5712 __ SmiShiftLeft(answer.reg(),
5713 left->reg(),
5714 rcx,
5715 deferred->entry_label());
5716 break;
5717 }
5718 default:
5719 UNREACHABLE();
5720 }
5721 deferred->BindExit();
5722 left->Unuse();
5723 right->Unuse();
5724 frame_->Push(&answer);
5725 return;
5726 }
5727
5728 // Handle the other binary operations.
5729 left->ToRegister();
5730 right->ToRegister();
5731 // A newly allocated register answer is used to hold the answer. The
5732 // registers containing left and right are not modified so they don't
5733 // need to be spilled in the fast case.
5734 Result answer = allocator_->Allocate();
5735 ASSERT(answer.is_valid());
5736
5737 // Perform the smi tag check.
5738 DeferredInlineBinaryOperation* deferred =
5739 new DeferredInlineBinaryOperation(op,
5740 answer.reg(),
5741 left->reg(),
5742 right->reg(),
5743 overwrite_mode);
5744 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5745
5746 switch (op) {
5747 case Token::ADD:
5748 __ SmiAdd(answer.reg(),
5749 left->reg(),
5750 right->reg(),
5751 deferred->entry_label());
5752 break;
5753
5754 case Token::SUB:
5755 __ SmiSub(answer.reg(),
5756 left->reg(),
5757 right->reg(),
5758 deferred->entry_label());
5759 break;
5760
5761 case Token::MUL: {
5762 __ SmiMul(answer.reg(),
5763 left->reg(),
5764 right->reg(),
5765 deferred->entry_label());
5766 break;
5767 }
5768
5769 case Token::BIT_OR:
5770 __ SmiOr(answer.reg(), left->reg(), right->reg());
5771 break;
5772
5773 case Token::BIT_AND:
5774 __ SmiAnd(answer.reg(), left->reg(), right->reg());
5775 break;
5776
5777 case Token::BIT_XOR:
5778 __ SmiXor(answer.reg(), left->reg(), right->reg());
5779 break;
5780
5781 default:
5782 UNREACHABLE();
5783 break;
5784 }
5785 deferred->BindExit();
5786 left->Unuse();
5787 right->Unuse();
5788 frame_->Push(&answer);
5789}
5790
5791
5792#undef __
5793#define __ ACCESS_MASM(masm)
5794
5795
5796Handle<String> Reference::GetName() {
5797 ASSERT(type_ == NAMED);
5798 Property* property = expression_->AsProperty();
5799 if (property == NULL) {
5800 // Global variable reference treated as a named property reference.
5801 VariableProxy* proxy = expression_->AsVariableProxy();
5802 ASSERT(proxy->AsVariable() != NULL);
5803 ASSERT(proxy->AsVariable()->is_global());
5804 return proxy->name();
5805 } else {
5806 Literal* raw_name = property->key()->AsLiteral();
5807 ASSERT(raw_name != NULL);
5808 return Handle<String>(String::cast(*raw_name->handle()));
5809 }
5810}
5811
5812
Steve Blockd0582a62009-12-15 09:54:21 +00005813void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005814 ASSERT(!cgen_->in_spilled_code());
5815 ASSERT(cgen_->HasValidEntryRegisters());
5816 ASSERT(!is_illegal());
5817 MacroAssembler* masm = cgen_->masm();
5818
5819 // Record the source position for the property load.
5820 Property* property = expression_->AsProperty();
5821 if (property != NULL) {
5822 cgen_->CodeForSourcePosition(property->position());
5823 }
5824
5825 switch (type_) {
5826 case SLOT: {
5827 Comment cmnt(masm, "[ Load from Slot");
5828 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5829 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00005830 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00005831 break;
5832 }
5833
5834 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00005835 Variable* var = expression_->AsVariableProxy()->AsVariable();
5836 bool is_global = var != NULL;
5837 ASSERT(!is_global || var->is_global());
5838
5839 // Do not inline the inobject property case for loads from the global
5840 // object. Also do not inline for unoptimized code. This saves time
5841 // in the code generator. Unoptimized code is toplevel code or code
5842 // that is not in a loop.
5843 if (is_global ||
5844 cgen_->scope()->is_global_scope() ||
5845 cgen_->loop_nesting() == 0) {
5846 Comment cmnt(masm, "[ Load from named Property");
5847 cgen_->frame()->Push(GetName());
5848
5849 RelocInfo::Mode mode = is_global
5850 ? RelocInfo::CODE_TARGET_CONTEXT
5851 : RelocInfo::CODE_TARGET;
5852 Result answer = cgen_->frame()->CallLoadIC(mode);
5853 // A test rax instruction following the call signals that the
5854 // inobject property case was inlined. Ensure that there is not
5855 // a test rax instruction here.
5856 __ nop();
5857 cgen_->frame()->Push(&answer);
5858 } else {
5859 // Inline the inobject property case.
5860 Comment cmnt(masm, "[ Inlined named property load");
5861 Result receiver = cgen_->frame()->Pop();
5862 receiver.ToRegister();
5863 Result value = cgen_->allocator()->Allocate();
5864 ASSERT(value.is_valid());
5865 // Cannot use r12 for receiver, because that changes
5866 // the distance between a call and a fixup location,
5867 // due to a special encoding of r12 as r/m in a ModR/M byte.
5868 if (receiver.reg().is(r12)) {
5869 // Swap receiver and value.
5870 __ movq(value.reg(), receiver.reg());
5871 Result temp = receiver;
5872 receiver = value;
5873 value = temp;
5874 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
5875 }
5876
5877 DeferredReferenceGetNamedValue* deferred =
5878 new DeferredReferenceGetNamedValue(value.reg(),
5879 receiver.reg(),
5880 GetName());
5881
5882 // Check that the receiver is a heap object.
5883 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5884
5885 __ bind(deferred->patch_site());
5886 // This is the map check instruction that will be patched (so we can't
5887 // use the double underscore macro that may insert instructions).
5888 // Initially use an invalid map to force a failure.
5889 masm->Move(kScratchRegister, Factory::null_value());
5890 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5891 kScratchRegister);
5892 // This branch is always a forwards branch so it's always a fixed
5893 // size which allows the assert below to succeed and patching to work.
5894 // Don't use deferred->Branch(...), since that might add coverage code.
5895 masm->j(not_equal, deferred->entry_label());
5896
5897 // The delta from the patch label to the load offset must be
5898 // statically known.
5899 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
5900 LoadIC::kOffsetToLoadInstruction);
5901 // The initial (invalid) offset has to be large enough to force
5902 // a 32-bit instruction encoding to allow patching with an
5903 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
5904 int offset = kMaxInt;
5905 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
5906
5907 __ IncrementCounter(&Counters::named_load_inline, 1);
5908 deferred->BindExit();
5909 cgen_->frame()->Push(&receiver);
5910 cgen_->frame()->Push(&value);
5911 }
5912 break;
5913 }
5914
5915 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00005916 Comment cmnt(masm, "[ Load from keyed Property");
5917 Variable* var = expression_->AsVariableProxy()->AsVariable();
5918 bool is_global = var != NULL;
5919 ASSERT(!is_global || var->is_global());
5920
5921 // Inline array load code if inside of a loop. We do not know
5922 // the receiver map yet, so we initially generate the code with
5923 // a check against an invalid map. In the inline cache code, we
5924 // patch the map check if appropriate.
5925 if (cgen_->loop_nesting() > 0) {
5926 Comment cmnt(masm, "[ Inlined load from keyed Property");
5927
5928 Result key = cgen_->frame()->Pop();
5929 Result receiver = cgen_->frame()->Pop();
5930 key.ToRegister();
5931 receiver.ToRegister();
5932
5933 // Use a fresh temporary to load the elements without destroying
5934 // the receiver which is needed for the deferred slow case.
5935 Result elements = cgen_->allocator()->Allocate();
5936 ASSERT(elements.is_valid());
5937
5938 // Use a fresh temporary for the index and later the loaded
5939 // value.
5940 Result index = cgen_->allocator()->Allocate();
5941 ASSERT(index.is_valid());
5942
5943 DeferredReferenceGetKeyedValue* deferred =
5944 new DeferredReferenceGetKeyedValue(index.reg(),
5945 receiver.reg(),
5946 key.reg(),
5947 is_global);
5948
5949 // Check that the receiver is not a smi (only needed if this
5950 // is not a load from the global context) and that it has the
5951 // expected map.
5952 if (!is_global) {
5953 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5954 }
5955
5956 // Initially, use an invalid map. The map is patched in the IC
5957 // initialization code.
5958 __ bind(deferred->patch_site());
5959 // Use masm-> here instead of the double underscore macro since extra
5960 // coverage code can interfere with the patching.
5961 masm->movq(kScratchRegister, Factory::null_value(),
5962 RelocInfo::EMBEDDED_OBJECT);
5963 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5964 kScratchRegister);
5965 deferred->Branch(not_equal);
5966
5967 // Check that the key is a non-negative smi.
5968 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
5969
5970 // Get the elements array from the receiver and check that it
5971 // is not a dictionary.
5972 __ movq(elements.reg(),
5973 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5974 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5975 Factory::fixed_array_map());
5976 deferred->Branch(not_equal);
5977
5978 // Shift the key to get the actual index value and check that
5979 // it is within bounds.
5980 __ SmiToInteger32(index.reg(), key.reg());
5981 __ cmpl(index.reg(),
5982 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
5983 deferred->Branch(above_equal);
5984
5985 // The index register holds the un-smi-tagged key. It has been
5986 // zero-extended to 64-bits, so it can be used directly as index in the
5987 // operand below.
5988 // Load and check that the result is not the hole. We could
5989 // reuse the index or elements register for the value.
5990 //
5991 // TODO(206): Consider whether it makes sense to try some
5992 // heuristic about which register to reuse. For example, if
5993 // one is rax, the we can reuse that one because the value
5994 // coming from the deferred code will be in rax.
5995 Result value = index;
5996 __ movq(value.reg(),
5997 Operand(elements.reg(),
5998 index.reg(),
5999 times_pointer_size,
6000 FixedArray::kHeaderSize - kHeapObjectTag));
6001 elements.Unuse();
6002 index.Unuse();
6003 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
6004 deferred->Branch(equal);
6005 __ IncrementCounter(&Counters::keyed_load_inline, 1);
6006
6007 deferred->BindExit();
6008 // Restore the receiver and key to the frame and push the
6009 // result on top of it.
6010 cgen_->frame()->Push(&receiver);
6011 cgen_->frame()->Push(&key);
6012 cgen_->frame()->Push(&value);
6013
6014 } else {
6015 Comment cmnt(masm, "[ Load from keyed Property");
6016 RelocInfo::Mode mode = is_global
6017 ? RelocInfo::CODE_TARGET_CONTEXT
6018 : RelocInfo::CODE_TARGET;
6019 Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
6020 // Make sure that we do not have a test instruction after the
6021 // call. A test instruction after the call is used to
6022 // indicate that we have generated an inline version of the
6023 // keyed load. The explicit nop instruction is here because
6024 // the push that follows might be peep-hole optimized away.
6025 __ nop();
6026 cgen_->frame()->Push(&answer);
6027 }
6028 break;
6029 }
6030
6031 default:
6032 UNREACHABLE();
6033 }
6034}
6035
6036
Steve Blockd0582a62009-12-15 09:54:21 +00006037void Reference::TakeValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00006038 // TODO(X64): This function is completely architecture independent. Move
6039 // it somewhere shared.
6040
6041 // For non-constant frame-allocated slots, we invalidate the value in the
6042 // slot. For all others, we fall back on GetValue.
6043 ASSERT(!cgen_->in_spilled_code());
6044 ASSERT(!is_illegal());
6045 if (type_ != SLOT) {
Steve Blockd0582a62009-12-15 09:54:21 +00006046 GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00006047 return;
6048 }
6049
6050 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6051 ASSERT(slot != NULL);
6052 if (slot->type() == Slot::LOOKUP ||
6053 slot->type() == Slot::CONTEXT ||
6054 slot->var()->mode() == Variable::CONST ||
6055 slot->is_arguments()) {
Steve Blockd0582a62009-12-15 09:54:21 +00006056 GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00006057 return;
6058 }
6059
6060 // Only non-constant, frame-allocated parameters and locals can reach
6061 // here. Be careful not to use the optimizations for arguments
6062 // object access since it may not have been initialized yet.
6063 ASSERT(!slot->is_arguments());
6064 if (slot->type() == Slot::PARAMETER) {
6065 cgen_->frame()->TakeParameterAt(slot->index());
6066 } else {
6067 ASSERT(slot->type() == Slot::LOCAL);
6068 cgen_->frame()->TakeLocalAt(slot->index());
6069 }
6070}
6071
6072
6073void Reference::SetValue(InitState init_state) {
6074 ASSERT(cgen_->HasValidEntryRegisters());
6075 ASSERT(!is_illegal());
6076 MacroAssembler* masm = cgen_->masm();
6077 switch (type_) {
6078 case SLOT: {
6079 Comment cmnt(masm, "[ Store to Slot");
6080 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6081 ASSERT(slot != NULL);
6082 cgen_->StoreToSlot(slot, init_state);
6083 break;
6084 }
6085
6086 case NAMED: {
6087 Comment cmnt(masm, "[ Store to named Property");
6088 cgen_->frame()->Push(GetName());
6089 Result answer = cgen_->frame()->CallStoreIC();
6090 cgen_->frame()->Push(&answer);
6091 break;
6092 }
6093
6094 case KEYED: {
6095 Comment cmnt(masm, "[ Store to keyed Property");
6096
6097 // Generate inlined version of the keyed store if the code is in
6098 // a loop and the key is likely to be a smi.
6099 Property* property = expression()->AsProperty();
6100 ASSERT(property != NULL);
6101 SmiAnalysis* key_smi_analysis = property->key()->type();
6102
6103 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6104 Comment cmnt(masm, "[ Inlined store to keyed Property");
6105
6106 // Get the receiver, key and value into registers.
6107 Result value = cgen_->frame()->Pop();
6108 Result key = cgen_->frame()->Pop();
6109 Result receiver = cgen_->frame()->Pop();
6110
6111 Result tmp = cgen_->allocator_->Allocate();
6112 ASSERT(tmp.is_valid());
6113
6114 // Determine whether the value is a constant before putting it
6115 // in a register.
6116 bool value_is_constant = value.is_constant();
6117
6118 // Make sure that value, key and receiver are in registers.
6119 value.ToRegister();
6120 key.ToRegister();
6121 receiver.ToRegister();
6122
6123 DeferredReferenceSetKeyedValue* deferred =
6124 new DeferredReferenceSetKeyedValue(value.reg(),
6125 key.reg(),
6126 receiver.reg());
6127
6128 // Check that the value is a smi if it is not a constant.
6129 // We can skip the write barrier for smis and constants.
6130 if (!value_is_constant) {
6131 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6132 }
6133
6134 // Check that the key is a non-negative smi.
6135 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
Steve Blocka7e24c12009-10-30 11:49:00 +00006136
6137 // Check that the receiver is not a smi.
6138 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6139
6140 // Check that the receiver is a JSArray.
6141 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6142 deferred->Branch(not_equal);
6143
6144 // Check that the key is within bounds. Both the key and the
Steve Block3ce2e202009-11-05 08:53:23 +00006145 // length of the JSArray are smis.
6146 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
6147 key.reg());
6148 deferred->Branch(less_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00006149
6150 // Get the elements array from the receiver and check that it
6151 // is a flat array (not a dictionary).
6152 __ movq(tmp.reg(),
6153 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6154 // Bind the deferred code patch site to be able to locate the
6155 // fixed array map comparison. When debugging, we patch this
6156 // comparison to always fail so that we will hit the IC call
6157 // in the deferred code which will allow the debugger to
6158 // break for fast case stores.
6159 __ bind(deferred->patch_site());
6160 // Avoid using __ to ensure the distance from patch_site
6161 // to the map address is always the same.
6162 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6163 RelocInfo::EMBEDDED_OBJECT);
6164 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6165 kScratchRegister);
6166 deferred->Branch(not_equal);
6167
6168 // Store the value.
6169 SmiIndex index =
6170 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6171 __ movq(Operand(tmp.reg(),
6172 index.reg,
6173 index.scale,
6174 FixedArray::kHeaderSize - kHeapObjectTag),
6175 value.reg());
6176 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6177
6178 deferred->BindExit();
6179
6180 cgen_->frame()->Push(&receiver);
6181 cgen_->frame()->Push(&key);
6182 cgen_->frame()->Push(&value);
6183 } else {
6184 Result answer = cgen_->frame()->CallKeyedStoreIC();
6185 // Make sure that we do not have a test instruction after the
6186 // call. A test instruction after the call is used to
6187 // indicate that we have generated an inline version of the
6188 // keyed store.
6189 masm->nop();
6190 cgen_->frame()->Push(&answer);
6191 }
6192 break;
6193 }
6194
6195 default:
6196 UNREACHABLE();
6197 }
6198}
6199
6200
6201void ToBooleanStub::Generate(MacroAssembler* masm) {
6202 Label false_result, true_result, not_string;
6203 __ movq(rax, Operand(rsp, 1 * kPointerSize));
6204
6205 // 'null' => false.
6206 __ CompareRoot(rax, Heap::kNullValueRootIndex);
6207 __ j(equal, &false_result);
6208
6209 // Get the map and type of the heap object.
6210 // We don't use CmpObjectType because we manipulate the type field.
6211 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6212 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
6213
6214 // Undetectable => false.
6215 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
6216 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
6217 __ j(not_zero, &false_result);
6218
6219 // JavaScript object => true.
6220 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
6221 __ j(above_equal, &true_result);
6222
6223 // String value => false iff empty.
6224 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
6225 __ j(above_equal, &not_string);
Steve Blocka7e24c12009-10-30 11:49:00 +00006226 __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
Steve Blockd0582a62009-12-15 09:54:21 +00006227 __ testl(rdx, rdx);
Steve Blocka7e24c12009-10-30 11:49:00 +00006228 __ j(zero, &false_result);
6229 __ jmp(&true_result);
6230
6231 __ bind(&not_string);
6232 // HeapNumber => false iff +0, -0, or NaN.
6233 // These three cases set C3 when compared to zero in the FPU.
6234 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6235 __ j(not_equal, &true_result);
Steve Blocka7e24c12009-10-30 11:49:00 +00006236 __ fldz(); // Load zero onto fp stack
6237 // Load heap-number double value onto fp stack
6238 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006239 __ FCmp();
6240 __ j(zero, &false_result);
Steve Blocka7e24c12009-10-30 11:49:00 +00006241 // Fall through to |true_result|.
6242
6243 // Return 1/0 for true/false in rax.
6244 __ bind(&true_result);
6245 __ movq(rax, Immediate(1));
6246 __ ret(1 * kPointerSize);
6247 __ bind(&false_result);
6248 __ xor_(rax, rax);
6249 __ ret(1 * kPointerSize);
6250}
6251
6252
6253bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
6254 // TODO(X64): This method is identical to the ia32 version.
6255 // Either find a reason to change it, or move it somewhere where it can be
6256 // shared. (Notice: It assumes that a Smi can fit in an int).
6257
6258 Object* answer_object = Heap::undefined_value();
6259 switch (op) {
6260 case Token::ADD:
6261 if (Smi::IsValid(left + right)) {
6262 answer_object = Smi::FromInt(left + right);
6263 }
6264 break;
6265 case Token::SUB:
6266 if (Smi::IsValid(left - right)) {
6267 answer_object = Smi::FromInt(left - right);
6268 }
6269 break;
6270 case Token::MUL: {
6271 double answer = static_cast<double>(left) * right;
6272 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
6273 // If the product is zero and the non-zero factor is negative,
6274 // the spec requires us to return floating point negative zero.
6275 if (answer != 0 || (left + right) >= 0) {
6276 answer_object = Smi::FromInt(static_cast<int>(answer));
6277 }
6278 }
6279 }
6280 break;
6281 case Token::DIV:
6282 case Token::MOD:
6283 break;
6284 case Token::BIT_OR:
6285 answer_object = Smi::FromInt(left | right);
6286 break;
6287 case Token::BIT_AND:
6288 answer_object = Smi::FromInt(left & right);
6289 break;
6290 case Token::BIT_XOR:
6291 answer_object = Smi::FromInt(left ^ right);
6292 break;
6293
6294 case Token::SHL: {
6295 int shift_amount = right & 0x1F;
6296 if (Smi::IsValid(left << shift_amount)) {
6297 answer_object = Smi::FromInt(left << shift_amount);
6298 }
6299 break;
6300 }
6301 case Token::SHR: {
6302 int shift_amount = right & 0x1F;
6303 unsigned int unsigned_left = left;
6304 unsigned_left >>= shift_amount;
6305 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
6306 answer_object = Smi::FromInt(unsigned_left);
6307 }
6308 break;
6309 }
6310 case Token::SAR: {
6311 int shift_amount = right & 0x1F;
6312 unsigned int unsigned_left = left;
6313 if (left < 0) {
6314 // Perform arithmetic shift of a negative number by
6315 // complementing number, logical shifting, complementing again.
6316 unsigned_left = ~unsigned_left;
6317 unsigned_left >>= shift_amount;
6318 unsigned_left = ~unsigned_left;
6319 } else {
6320 unsigned_left >>= shift_amount;
6321 }
6322 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
6323 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
6324 break;
6325 }
6326 default:
6327 UNREACHABLE();
6328 break;
6329 }
6330 if (answer_object == Heap::undefined_value()) {
6331 return false;
6332 }
6333 frame_->Push(Handle<Object>(answer_object));
6334 return true;
6335}
6336
6337
6338// End of CodeGenerator implementation.
6339
6340void UnarySubStub::Generate(MacroAssembler* masm) {
6341 Label slow;
6342 Label done;
6343 Label try_float;
Steve Blocka7e24c12009-10-30 11:49:00 +00006344 // Check whether the value is a smi.
6345 __ JumpIfNotSmi(rax, &try_float);
6346
6347 // Enter runtime system if the value of the smi is zero
6348 // to make sure that we switch between 0 and -0.
Steve Block3ce2e202009-11-05 08:53:23 +00006349 // Also enter it if the value of the smi is Smi::kMinValue.
6350 __ SmiNeg(rax, rax, &done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006351
Steve Block3ce2e202009-11-05 08:53:23 +00006352 // Either zero or Smi::kMinValue, neither of which become a smi when negated.
6353 __ SmiCompare(rax, Smi::FromInt(0));
6354 __ j(not_equal, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006355 __ Move(rax, Factory::minus_zero_value());
6356 __ jmp(&done);
6357
6358 // Enter runtime system.
6359 __ bind(&slow);
6360 __ pop(rcx); // pop return address
6361 __ push(rax);
6362 __ push(rcx); // push return address
6363 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
6364 __ jmp(&done);
6365
6366 // Try floating point case.
6367 __ bind(&try_float);
6368 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6369 __ Cmp(rdx, Factory::heap_number_map());
6370 __ j(not_equal, &slow);
6371 // Operand is a float, negate its value by flipping sign bit.
6372 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
6373 __ movq(kScratchRegister, Immediate(0x01));
6374 __ shl(kScratchRegister, Immediate(63));
6375 __ xor_(rdx, kScratchRegister); // Flip sign.
6376 // rdx is value to store.
6377 if (overwrite_) {
6378 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
6379 } else {
Steve Block3ce2e202009-11-05 08:53:23 +00006380 __ AllocateHeapNumber(rcx, rbx, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006381 // rcx: allocated 'empty' number
6382 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
6383 __ movq(rax, rcx);
6384 }
6385
6386 __ bind(&done);
6387 __ StubReturn(1);
6388}
6389
6390
6391void CompareStub::Generate(MacroAssembler* masm) {
6392 Label call_builtin, done;
6393
6394 // NOTICE! This code is only reached after a smi-fast-case check, so
6395 // it is certain that at least one operand isn't a smi.
6396
6397 if (cc_ == equal) { // Both strict and non-strict.
6398 Label slow; // Fallthrough label.
6399 // Equality is almost reflexive (everything but NaN), so start by testing
6400 // for "identity and not NaN".
6401 {
6402 Label not_identical;
6403 __ cmpq(rax, rdx);
6404 __ j(not_equal, &not_identical);
6405 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6406 // so we do the second best thing - test it ourselves.
6407
6408 Label return_equal;
6409 Label heap_number;
6410 // If it's not a heap number, then return equal.
6411 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
6412 Factory::heap_number_map());
6413 __ j(equal, &heap_number);
6414 __ bind(&return_equal);
6415 __ xor_(rax, rax);
6416 __ ret(0);
6417
6418 __ bind(&heap_number);
6419 // It is a heap number, so return non-equal if it's NaN and equal if it's
6420 // not NaN.
6421 // The representation of NaN values has all exponent bits (52..62) set,
6422 // and not all mantissa bits (0..51) clear.
Steve Blockd0582a62009-12-15 09:54:21 +00006423 // We only allow QNaNs, which have bit 51 set (which also rules out
6424 // the value being Infinity).
6425
6426 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
6427 // all bits in the mask are set. We only need to check the word
6428 // that contains the exponent and high bit of the mantissa.
6429 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
6430 __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
6431 __ xorl(rax, rax);
6432 __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
6433 __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
6434 __ setcc(above_equal, rax);
Steve Blocka7e24c12009-10-30 11:49:00 +00006435 __ ret(0);
6436
6437 __ bind(&not_identical);
6438 }
6439
6440 // If we're doing a strict equality comparison, we don't have to do
6441 // type conversion, so we generate code to do fast comparison for objects
6442 // and oddballs. Non-smi numbers and strings still go through the usual
6443 // slow-case code.
6444 if (strict_) {
6445 // If either is a Smi (we know that not both are), then they can only
6446 // be equal if the other is a HeapNumber. If so, use the slow case.
6447 {
6448 Label not_smis;
6449 __ SelectNonSmi(rbx, rax, rdx, &not_smis);
6450
6451 // Check if the non-smi operand is a heap number.
6452 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
6453 Factory::heap_number_map());
6454 // If heap number, handle it in the slow case.
6455 __ j(equal, &slow);
6456 // Return non-equal. ebx (the lower half of rbx) is not zero.
6457 __ movq(rax, rbx);
6458 __ ret(0);
6459
6460 __ bind(&not_smis);
6461 }
6462
6463 // If either operand is a JSObject or an oddball value, then they are not
6464 // equal since their pointers are different
6465 // There is no test for undetectability in strict equality.
6466
6467 // If the first object is a JS object, we have done pointer comparison.
6468 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6469 Label first_non_object;
6470 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
6471 __ j(below, &first_non_object);
6472 // Return non-zero (eax (not rax) is not zero)
6473 Label return_not_equal;
6474 ASSERT(kHeapObjectTag != 0);
6475 __ bind(&return_not_equal);
6476 __ ret(0);
6477
6478 __ bind(&first_non_object);
6479 // Check for oddballs: true, false, null, undefined.
6480 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6481 __ j(equal, &return_not_equal);
6482
6483 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
6484 __ j(above_equal, &return_not_equal);
6485
6486 // Check for oddballs: true, false, null, undefined.
6487 __ CmpInstanceType(rcx, ODDBALL_TYPE);
6488 __ j(equal, &return_not_equal);
6489
6490 // Fall through to the general case.
6491 }
6492 __ bind(&slow);
6493 }
6494
6495 // Push arguments below the return address to prepare jump to builtin.
6496 __ pop(rcx);
6497 __ push(rax);
6498 __ push(rdx);
6499 __ push(rcx);
6500
6501 // Inlined floating point compare.
6502 // Call builtin if operands are not floating point or smi.
6503 Label check_for_symbols;
6504 // Push arguments on stack, for helper functions.
Steve Block3ce2e202009-11-05 08:53:23 +00006505 FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
Steve Blocka7e24c12009-10-30 11:49:00 +00006506 FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
6507 __ FCmp();
6508
6509 // Jump to builtin for NaN.
6510 __ j(parity_even, &call_builtin);
6511
6512 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
6513 Label below_lbl, above_lbl;
6514 // use rdx, rax to convert unsigned to signed comparison
6515 __ j(below, &below_lbl);
6516 __ j(above, &above_lbl);
6517
6518 __ xor_(rax, rax); // equal
6519 __ ret(2 * kPointerSize);
6520
6521 __ bind(&below_lbl);
6522 __ movq(rax, Immediate(-1));
6523 __ ret(2 * kPointerSize);
6524
6525 __ bind(&above_lbl);
6526 __ movq(rax, Immediate(1));
6527 __ ret(2 * kPointerSize); // rax, rdx were pushed
6528
6529 // Fast negative check for symbol-to-symbol equality.
6530 __ bind(&check_for_symbols);
6531 if (cc_ == equal) {
6532 BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
6533 BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
6534
6535 // We've already checked for object identity, so if both operands
6536 // are symbols they aren't equal. Register eax (not rax) already holds a
6537 // non-zero value, which indicates not equal, so just return.
6538 __ ret(2 * kPointerSize);
6539 }
6540
6541 __ bind(&call_builtin);
6542 // must swap argument order
6543 __ pop(rcx);
6544 __ pop(rdx);
6545 __ pop(rax);
6546 __ push(rdx);
6547 __ push(rax);
6548
6549 // Figure out which native to call and setup the arguments.
6550 Builtins::JavaScript builtin;
6551 if (cc_ == equal) {
6552 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6553 } else {
6554 builtin = Builtins::COMPARE;
6555 int ncr; // NaN compare result
6556 if (cc_ == less || cc_ == less_equal) {
6557 ncr = GREATER;
6558 } else {
6559 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
6560 ncr = LESS;
6561 }
Steve Block3ce2e202009-11-05 08:53:23 +00006562 __ Push(Smi::FromInt(ncr));
Steve Blocka7e24c12009-10-30 11:49:00 +00006563 }
6564
6565 // Restore return address on the stack.
6566 __ push(rcx);
6567
6568 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6569 // tagged as a small integer.
6570 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
6571}
6572
6573
6574void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
6575 Label* label,
6576 Register object,
6577 Register scratch) {
6578 __ JumpIfSmi(object, label);
6579 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
6580 __ movzxbq(scratch,
6581 FieldOperand(scratch, Map::kInstanceTypeOffset));
6582 __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
6583 __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
6584 __ j(not_equal, label);
6585}
6586
6587
6588// Call the function just below TOS on the stack with the given
6589// arguments. The receiver is the TOS.
6590void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
6591 int position) {
6592 // Push the arguments ("left-to-right") on the stack.
6593 int arg_count = args->length();
6594 for (int i = 0; i < arg_count; i++) {
6595 Load(args->at(i));
6596 }
6597
6598 // Record the position for debugging purposes.
6599 CodeForSourcePosition(position);
6600
6601 // Use the shared code stub to call the function.
6602 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
6603 CallFunctionStub call_function(arg_count, in_loop);
6604 Result answer = frame_->CallStub(&call_function, arg_count + 1);
6605 // Restore context and replace function on the stack with the
6606 // result of the stub invocation.
6607 frame_->RestoreContextRegister();
6608 frame_->SetElementAt(0, &answer);
6609}
6610
6611
6612void InstanceofStub::Generate(MacroAssembler* masm) {
6613 // Implements "value instanceof function" operator.
6614 // Expected input state:
6615 // rsp[0] : return address
6616 // rsp[1] : function pointer
6617 // rsp[2] : value
6618
6619 // Get the object - go slow case if it's a smi.
6620 Label slow;
6621 __ movq(rax, Operand(rsp, 2 * kPointerSize));
6622 __ JumpIfSmi(rax, &slow);
6623
6624 // Check that the left hand is a JS object. Leave its map in rax.
6625 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
6626 __ j(below, &slow);
6627 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
6628 __ j(above, &slow);
6629
6630 // Get the prototype of the function.
6631 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6632 __ TryGetFunctionPrototype(rdx, rbx, &slow);
6633
6634 // Check that the function prototype is a JS object.
6635 __ JumpIfSmi(rbx, &slow);
6636 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
6637 __ j(below, &slow);
6638 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
6639 __ j(above, &slow);
6640
6641 // Register mapping: rax is object map and rbx is function prototype.
6642 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
6643
6644 // Loop through the prototype chain looking for the function prototype.
6645 Label loop, is_instance, is_not_instance;
6646 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
6647 __ bind(&loop);
6648 __ cmpq(rcx, rbx);
6649 __ j(equal, &is_instance);
6650 __ cmpq(rcx, kScratchRegister);
6651 __ j(equal, &is_not_instance);
6652 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
6653 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
6654 __ jmp(&loop);
6655
6656 __ bind(&is_instance);
Steve Blockd0582a62009-12-15 09:54:21 +00006657 __ xorl(rax, rax);
Steve Blocka7e24c12009-10-30 11:49:00 +00006658 __ ret(2 * kPointerSize);
6659
6660 __ bind(&is_not_instance);
Steve Blockd0582a62009-12-15 09:54:21 +00006661 __ movl(rax, Immediate(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00006662 __ ret(2 * kPointerSize);
6663
6664 // Slow-case: Go through the JavaScript implementation.
6665 __ bind(&slow);
6666 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
6667}
6668
6669
6670void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6671 // The displacement is used for skipping the return address and the
6672 // frame pointer on the stack. It is the offset of the last
6673 // parameter (if any) relative to the frame pointer.
6674 static const int kDisplacement = 2 * kPointerSize;
6675
6676 // Check if the calling frame is an arguments adaptor frame.
6677 Label runtime;
6678 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006679 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6680 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006681 __ j(not_equal, &runtime);
6682 // Value in rcx is Smi encoded.
6683
6684 // Patch the arguments.length and the parameters pointer.
6685 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6686 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
6687 SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
6688 __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
6689 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
6690
6691 // Do the runtime call to allocate the arguments object.
6692 __ bind(&runtime);
6693 Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
6694 __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
6695}
6696
6697
6698void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6699 // The key is in rdx and the parameter count is in rax.
6700
6701 // The displacement is used for skipping the frame pointer on the
6702 // stack. It is the offset of the last parameter (if any) relative
6703 // to the frame pointer.
6704 static const int kDisplacement = 1 * kPointerSize;
6705
6706 // Check that the key is a smi.
6707 Label slow;
6708 __ JumpIfNotSmi(rdx, &slow);
6709
6710 // Check if the calling frame is an arguments adaptor frame.
6711 Label adaptor;
6712 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006713 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
6714 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006715 __ j(equal, &adaptor);
6716
6717 // Check index against formal parameters count limit passed in
6718 // through register rax. Use unsigned comparison to get negative
6719 // check for free.
6720 __ cmpq(rdx, rax);
6721 __ j(above_equal, &slow);
6722
6723 // Read the argument from the stack and return it.
6724 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
6725 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
6726 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6727 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6728 __ Ret();
6729
6730 // Arguments adaptor case: Check index against actual arguments
6731 // limit found in the arguments adaptor frame. Use unsigned
6732 // comparison to get negative check for free.
6733 __ bind(&adaptor);
6734 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6735 __ cmpq(rdx, rcx);
6736 __ j(above_equal, &slow);
6737
6738 // Read the argument from the stack and return it.
6739 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
6740 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
6741 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6742 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6743 __ Ret();
6744
6745 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6746 // by calling the runtime system.
6747 __ bind(&slow);
6748 __ pop(rbx); // Return address.
6749 __ push(rdx);
6750 __ push(rbx);
6751 Runtime::Function* f =
6752 Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
6753 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
6754}
6755
6756
6757void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6758 // Check if the calling frame is an arguments adaptor frame.
6759 Label adaptor;
6760 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
Steve Block3ce2e202009-11-05 08:53:23 +00006761 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6762 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
Steve Blocka7e24c12009-10-30 11:49:00 +00006763 __ j(equal, &adaptor);
6764
6765 // Nothing to do: The formal number of parameters has already been
6766 // passed in register rax by calling function. Just return it.
6767 __ ret(0);
6768
6769 // Arguments adaptor case: Read the arguments length from the
6770 // adaptor frame and return it.
6771 __ bind(&adaptor);
6772 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6773 __ ret(0);
6774}
6775
6776
6777int CEntryStub::MinorKey() {
6778 ASSERT(result_size_ <= 2);
6779#ifdef _WIN64
6780 // Simple results returned in rax (using default code).
6781 // Complex results must be written to address passed as first argument.
6782 // Use even numbers for minor keys, reserving the odd numbers for
6783 // CEntryDebugBreakStub.
6784 return (result_size_ < 2) ? 0 : result_size_ * 2;
6785#else
6786 // Single results returned in rax (both AMD64 and Win64 calling conventions)
6787 // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
6788 // by default.
6789 return 0;
6790#endif
6791}
6792
6793
6794void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6795 // Check that stack should contain next handler, frame pointer, state and
6796 // return address in that order.
6797 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6798 StackHandlerConstants::kStateOffset);
6799 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6800 StackHandlerConstants::kPCOffset);
6801
6802 ExternalReference handler_address(Top::k_handler_address);
6803 __ movq(kScratchRegister, handler_address);
6804 __ movq(rsp, Operand(kScratchRegister, 0));
6805 // get next in chain
6806 __ pop(rcx);
6807 __ movq(Operand(kScratchRegister, 0), rcx);
6808 __ pop(rbp); // pop frame pointer
6809 __ pop(rdx); // remove state
6810
6811 // Before returning we restore the context from the frame pointer if not NULL.
6812 // The frame pointer is NULL in the exception handler of a JS entry frame.
6813 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
6814 Label skip;
6815 __ cmpq(rbp, Immediate(0));
6816 __ j(equal, &skip);
6817 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6818 __ bind(&skip);
6819 __ ret(0);
6820}
6821
6822
6823void CEntryStub::GenerateCore(MacroAssembler* masm,
6824 Label* throw_normal_exception,
6825 Label* throw_termination_exception,
6826 Label* throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006827 ExitFrame::Mode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006828 bool do_gc,
6829 bool always_allocate_scope) {
6830 // rax: result parameter for PerformGC, if any.
6831 // rbx: pointer to C function (C callee-saved).
6832 // rbp: frame pointer (restored after C call).
6833 // rsp: stack pointer (restored after C call).
6834 // r14: number of arguments including receiver (C callee-saved).
6835 // r15: pointer to the first argument (C callee-saved).
6836 // This pointer is reused in LeaveExitFrame(), so it is stored in a
6837 // callee-saved register.
6838
6839 if (do_gc) {
6840 // Pass failure code returned from last attempt as first argument to GC.
6841#ifdef _WIN64
6842 __ movq(rcx, rax);
6843#else // ! defined(_WIN64)
6844 __ movq(rdi, rax);
6845#endif
6846 __ movq(kScratchRegister,
6847 FUNCTION_ADDR(Runtime::PerformGC),
6848 RelocInfo::RUNTIME_ENTRY);
6849 __ call(kScratchRegister);
6850 }
6851
6852 ExternalReference scope_depth =
6853 ExternalReference::heap_always_allocate_scope_depth();
6854 if (always_allocate_scope) {
6855 __ movq(kScratchRegister, scope_depth);
6856 __ incl(Operand(kScratchRegister, 0));
6857 }
6858
6859 // Call C function.
6860#ifdef _WIN64
6861 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
6862 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
6863 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
6864 __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
6865 if (result_size_ < 2) {
6866 // Pass a pointer to the Arguments object as the first argument.
6867 // Return result in single register (rax).
6868 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
6869 } else {
6870 ASSERT_EQ(2, result_size_);
6871 // Pass a pointer to the result location as the first argument.
6872 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
6873 // Pass a pointer to the Arguments object as the second argument.
6874 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
6875 }
6876
6877#else // ! defined(_WIN64)
6878 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
6879 __ movq(rdi, r14); // argc.
6880 __ movq(rsi, r15); // argv.
6881#endif
6882 __ call(rbx);
6883 // Result is in rax - do not destroy this register!
6884
6885 if (always_allocate_scope) {
6886 __ movq(kScratchRegister, scope_depth);
6887 __ decl(Operand(kScratchRegister, 0));
6888 }
6889
6890 // Check for failure result.
6891 Label failure_returned;
6892 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
Steve Block3ce2e202009-11-05 08:53:23 +00006893#ifdef _WIN64
6894 // If return value is on the stack, pop it to registers.
6895 if (result_size_ > 1) {
6896 ASSERT_EQ(2, result_size_);
Steve Blockd0582a62009-12-15 09:54:21 +00006897 // Read result values stored on stack. Result is stored
6898 // above the four argument mirror slots and the two
6899 // Arguments object slots.
Steve Block3ce2e202009-11-05 08:53:23 +00006900 __ movq(rax, Operand(rsp, 6 * kPointerSize));
6901 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
6902 }
6903#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00006904 __ lea(rcx, Operand(rax, 1));
6905 // Lower 2 bits of rcx are 0 iff rax has failure tag.
6906 __ testl(rcx, Immediate(kFailureTagMask));
6907 __ j(zero, &failure_returned);
6908
6909 // Exit the JavaScript to C++ exit frame.
Steve Blockd0582a62009-12-15 09:54:21 +00006910 __ LeaveExitFrame(mode, result_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006911 __ ret(0);
6912
6913 // Handling of failure.
6914 __ bind(&failure_returned);
6915
6916 Label retry;
6917 // If the returned exception is RETRY_AFTER_GC continue at retry label
6918 ASSERT(Failure::RETRY_AFTER_GC == 0);
6919 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6920 __ j(zero, &retry);
6921
6922 // Special handling of out of memory exceptions.
6923 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
6924 __ cmpq(rax, kScratchRegister);
6925 __ j(equal, throw_out_of_memory_exception);
6926
6927 // Retrieve the pending exception and clear the variable.
6928 ExternalReference pending_exception_address(Top::k_pending_exception_address);
6929 __ movq(kScratchRegister, pending_exception_address);
6930 __ movq(rax, Operand(kScratchRegister, 0));
6931 __ movq(rdx, ExternalReference::the_hole_value_location());
6932 __ movq(rdx, Operand(rdx, 0));
6933 __ movq(Operand(kScratchRegister, 0), rdx);
6934
6935 // Special handling of termination exceptions which are uncatchable
6936 // by javascript code.
6937 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
6938 __ j(equal, throw_termination_exception);
6939
6940 // Handle normal exception.
6941 __ jmp(throw_normal_exception);
6942
6943 // Retry.
6944 __ bind(&retry);
6945}
6946
6947
6948void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6949 UncatchableExceptionType type) {
6950 // Fetch top stack handler.
6951 ExternalReference handler_address(Top::k_handler_address);
6952 __ movq(kScratchRegister, handler_address);
6953 __ movq(rsp, Operand(kScratchRegister, 0));
6954
6955 // Unwind the handlers until the ENTRY handler is found.
6956 Label loop, done;
6957 __ bind(&loop);
6958 // Load the type of the current stack handler.
6959 const int kStateOffset = StackHandlerConstants::kStateOffset;
6960 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
6961 __ j(equal, &done);
6962 // Fetch the next handler in the list.
6963 const int kNextOffset = StackHandlerConstants::kNextOffset;
6964 __ movq(rsp, Operand(rsp, kNextOffset));
6965 __ jmp(&loop);
6966 __ bind(&done);
6967
6968 // Set the top handler address to next handler past the current ENTRY handler.
6969 __ movq(kScratchRegister, handler_address);
6970 __ pop(Operand(kScratchRegister, 0));
6971
6972 if (type == OUT_OF_MEMORY) {
6973 // Set external caught exception to false.
6974 ExternalReference external_caught(Top::k_external_caught_exception_address);
6975 __ movq(rax, Immediate(false));
6976 __ store_rax(external_caught);
6977
6978 // Set pending exception and rax to out of memory exception.
6979 ExternalReference pending_exception(Top::k_pending_exception_address);
6980 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
6981 __ store_rax(pending_exception);
6982 }
6983
6984 // Clear the context pointer.
6985 __ xor_(rsi, rsi);
6986
6987 // Restore registers from handler.
6988 ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
6989 StackHandlerConstants::kFPOffset);
6990 __ pop(rbp); // FP
6991 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6992 StackHandlerConstants::kStateOffset);
6993 __ pop(rdx); // State
6994
6995 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6996 StackHandlerConstants::kPCOffset);
6997 __ ret(0);
6998}
6999
7000
7001void CallFunctionStub::Generate(MacroAssembler* masm) {
7002 Label slow;
7003
7004 // Get the function to call from the stack.
7005 // +2 ~ receiver, return address
7006 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
7007
7008 // Check that the function really is a JavaScript function.
7009 __ JumpIfSmi(rdi, &slow);
7010 // Goto slow case if we do not have a function.
7011 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
7012 __ j(not_equal, &slow);
7013
7014 // Fast-case: Just invoke the function.
7015 ParameterCount actual(argc_);
7016 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
7017
7018 // Slow-case: Non-function called.
7019 __ bind(&slow);
7020 __ Set(rax, argc_);
7021 __ Set(rbx, 0);
7022 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
7023 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
7024 __ Jump(adaptor, RelocInfo::CODE_TARGET);
7025}
7026
7027
7028void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
7029 // rax: number of arguments including receiver
7030 // rbx: pointer to C function (C callee-saved)
7031 // rbp: frame pointer of calling JS frame (restored after C call)
7032 // rsp: stack pointer (restored after C call)
7033 // rsi: current context (restored)
7034
7035 // NOTE: Invocations of builtins may return failure objects
7036 // instead of a proper result. The builtin entry handles
7037 // this by performing a garbage collection and retrying the
7038 // builtin once.
7039
Steve Blockd0582a62009-12-15 09:54:21 +00007040 ExitFrame::Mode mode = is_debug_break ?
7041 ExitFrame::MODE_DEBUG :
7042 ExitFrame::MODE_NORMAL;
Steve Blocka7e24c12009-10-30 11:49:00 +00007043
7044 // Enter the exit frame that transitions from JavaScript to C++.
Steve Blockd0582a62009-12-15 09:54:21 +00007045 __ EnterExitFrame(mode, result_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00007046
7047 // rax: Holds the context at this point, but should not be used.
7048 // On entry to code generated by GenerateCore, it must hold
7049 // a failure result if the collect_garbage argument to GenerateCore
7050 // is true. This failure result can be the result of code
7051 // generated by a previous call to GenerateCore. The value
7052 // of rax is then passed to Runtime::PerformGC.
7053 // rbx: pointer to builtin function (C callee-saved).
7054 // rbp: frame pointer of exit frame (restored after C call).
7055 // rsp: stack pointer (restored after C call).
7056 // r14: number of arguments including receiver (C callee-saved).
7057 // r15: argv pointer (C callee-saved).
7058
7059 Label throw_normal_exception;
7060 Label throw_termination_exception;
7061 Label throw_out_of_memory_exception;
7062
7063 // Call into the runtime system.
7064 GenerateCore(masm,
7065 &throw_normal_exception,
7066 &throw_termination_exception,
7067 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00007068 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00007069 false,
7070 false);
7071
7072 // Do space-specific GC and retry runtime call.
7073 GenerateCore(masm,
7074 &throw_normal_exception,
7075 &throw_termination_exception,
7076 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00007077 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00007078 true,
7079 false);
7080
7081 // Do full GC and retry runtime call one final time.
7082 Failure* failure = Failure::InternalError();
7083 __ movq(rax, failure, RelocInfo::NONE);
7084 GenerateCore(masm,
7085 &throw_normal_exception,
7086 &throw_termination_exception,
7087 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00007088 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00007089 true,
7090 true);
7091
7092 __ bind(&throw_out_of_memory_exception);
7093 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
7094
7095 __ bind(&throw_termination_exception);
7096 GenerateThrowUncatchable(masm, TERMINATION);
7097
7098 __ bind(&throw_normal_exception);
7099 GenerateThrowTOS(masm);
7100}
7101
7102
Steve Blockd0582a62009-12-15 09:54:21 +00007103void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
7104 UNREACHABLE();
7105}
7106
7107
Steve Blocka7e24c12009-10-30 11:49:00 +00007108void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
7109 Label invoke, exit;
7110#ifdef ENABLE_LOGGING_AND_PROFILING
7111 Label not_outermost_js, not_outermost_js_2;
7112#endif
7113
7114 // Setup frame.
7115 __ push(rbp);
7116 __ movq(rbp, rsp);
7117
7118 // Push the stack frame type marker twice.
7119 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
Steve Block3ce2e202009-11-05 08:53:23 +00007120 __ Push(Smi::FromInt(marker)); // context slot
7121 __ Push(Smi::FromInt(marker)); // function slot
Steve Blocka7e24c12009-10-30 11:49:00 +00007122 // Save callee-saved registers (X64 calling conventions).
7123 __ push(r12);
7124 __ push(r13);
7125 __ push(r14);
7126 __ push(r15);
7127 __ push(rdi);
7128 __ push(rsi);
7129 __ push(rbx);
7130 // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
7131 // callee-save in JS code as well.
7132
7133 // Save copies of the top frame descriptor on the stack.
7134 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
7135 __ load_rax(c_entry_fp);
7136 __ push(rax);
7137
7138#ifdef ENABLE_LOGGING_AND_PROFILING
7139 // If this is the outermost JS call, set js_entry_sp value.
7140 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
7141 __ load_rax(js_entry_sp);
7142 __ testq(rax, rax);
7143 __ j(not_zero, &not_outermost_js);
7144 __ movq(rax, rbp);
7145 __ store_rax(js_entry_sp);
7146 __ bind(&not_outermost_js);
7147#endif
7148
7149 // Call a faked try-block that does the invoke.
7150 __ call(&invoke);
7151
7152 // Caught exception: Store result (exception) in the pending
7153 // exception field in the JSEnv and return a failure sentinel.
7154 ExternalReference pending_exception(Top::k_pending_exception_address);
7155 __ store_rax(pending_exception);
7156 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
7157 __ jmp(&exit);
7158
7159 // Invoke: Link this frame into the handler chain.
7160 __ bind(&invoke);
7161 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
7162
7163 // Clear any pending exceptions.
7164 __ load_rax(ExternalReference::the_hole_value_location());
7165 __ store_rax(pending_exception);
7166
7167 // Fake a receiver (NULL).
7168 __ push(Immediate(0)); // receiver
7169
7170 // Invoke the function by calling through JS entry trampoline
7171 // builtin and pop the faked function when we return. We load the address
7172 // from an external reference instead of inlining the call target address
7173 // directly in the code, because the builtin stubs may not have been
7174 // generated yet at the time this code is generated.
7175 if (is_construct) {
7176 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
7177 __ load_rax(construct_entry);
7178 } else {
7179 ExternalReference entry(Builtins::JSEntryTrampoline);
7180 __ load_rax(entry);
7181 }
7182 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
7183 __ call(kScratchRegister);
7184
7185 // Unlink this frame from the handler chain.
7186 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
7187 __ pop(Operand(kScratchRegister, 0));
7188 // Pop next_sp.
7189 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
7190
7191#ifdef ENABLE_LOGGING_AND_PROFILING
7192 // If current EBP value is the same as js_entry_sp value, it means that
7193 // the current function is the outermost.
7194 __ movq(kScratchRegister, js_entry_sp);
7195 __ cmpq(rbp, Operand(kScratchRegister, 0));
7196 __ j(not_equal, &not_outermost_js_2);
7197 __ movq(Operand(kScratchRegister, 0), Immediate(0));
7198 __ bind(&not_outermost_js_2);
7199#endif
7200
7201 // Restore the top frame descriptor from the stack.
7202 __ bind(&exit);
7203 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
7204 __ pop(Operand(kScratchRegister, 0));
7205
7206 // Restore callee-saved registers (X64 conventions).
7207 __ pop(rbx);
7208 __ pop(rsi);
7209 __ pop(rdi);
7210 __ pop(r15);
7211 __ pop(r14);
7212 __ pop(r13);
7213 __ pop(r12);
7214 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
7215
7216 // Restore frame pointer and return.
7217 __ pop(rbp);
7218 __ ret(0);
7219}
7220
7221
7222// -----------------------------------------------------------------------------
7223// Implementation of stubs.
7224
7225// Stub classes have public member named masm, not masm_.
7226
7227void StackCheckStub::Generate(MacroAssembler* masm) {
7228 // Because builtins always remove the receiver from the stack, we
7229 // have to fake one to avoid underflowing the stack. The receiver
7230 // must be inserted below the return address on the stack so we
7231 // temporarily store that in a register.
7232 __ pop(rax);
Steve Block3ce2e202009-11-05 08:53:23 +00007233 __ Push(Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00007234 __ push(rax);
7235
7236 // Do tail-call to runtime routine.
7237 Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
7238 __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
7239}
7240
7241
Steve Blocka7e24c12009-10-30 11:49:00 +00007242void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7243 Register number) {
7244 Label load_smi, done;
7245
7246 __ JumpIfSmi(number, &load_smi);
7247 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7248 __ jmp(&done);
7249
7250 __ bind(&load_smi);
7251 __ SmiToInteger32(number, number);
7252 __ push(number);
7253 __ fild_s(Operand(rsp, 0));
7254 __ pop(number);
7255
7256 __ bind(&done);
7257}
7258
7259
7260void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7261 Register src,
7262 XMMRegister dst) {
7263 Label load_smi, done;
7264
7265 __ JumpIfSmi(src, &load_smi);
7266 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
7267 __ jmp(&done);
7268
7269 __ bind(&load_smi);
7270 __ SmiToInteger32(src, src);
7271 __ cvtlsi2sd(dst, src);
7272
7273 __ bind(&done);
7274}
7275
7276
7277void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7278 XMMRegister dst1,
7279 XMMRegister dst2) {
7280 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7281 LoadFloatOperand(masm, kScratchRegister, dst1);
7282 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7283 LoadFloatOperand(masm, kScratchRegister, dst2);
7284}
7285
7286
7287void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
7288 const Operand& src,
7289 Register dst) {
7290 // TODO(X64): Convert number operands to int32 values.
7291 // Don't convert a Smi to a double first.
7292 UNIMPLEMENTED();
7293}
7294
7295
7296void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
7297 Label load_smi_1, load_smi_2, done_load_1, done;
7298 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7299 __ JumpIfSmi(kScratchRegister, &load_smi_1);
7300 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7301 __ bind(&done_load_1);
7302
7303 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7304 __ JumpIfSmi(kScratchRegister, &load_smi_2);
7305 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7306 __ jmp(&done);
7307
7308 __ bind(&load_smi_1);
7309 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7310 __ push(kScratchRegister);
7311 __ fild_s(Operand(rsp, 0));
7312 __ pop(kScratchRegister);
7313 __ jmp(&done_load_1);
7314
7315 __ bind(&load_smi_2);
7316 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7317 __ push(kScratchRegister);
7318 __ fild_s(Operand(rsp, 0));
7319 __ pop(kScratchRegister);
7320
7321 __ bind(&done);
7322}
7323
7324
7325void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7326 Register lhs,
7327 Register rhs) {
7328 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
7329 __ JumpIfSmi(lhs, &load_smi_lhs);
7330 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
7331 __ bind(&done_load_lhs);
7332
7333 __ JumpIfSmi(rhs, &load_smi_rhs);
7334 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
7335 __ jmp(&done);
7336
7337 __ bind(&load_smi_lhs);
7338 __ SmiToInteger64(kScratchRegister, lhs);
7339 __ push(kScratchRegister);
7340 __ fild_d(Operand(rsp, 0));
7341 __ pop(kScratchRegister);
7342 __ jmp(&done_load_lhs);
7343
7344 __ bind(&load_smi_rhs);
7345 __ SmiToInteger64(kScratchRegister, rhs);
7346 __ push(kScratchRegister);
7347 __ fild_d(Operand(rsp, 0));
7348 __ pop(kScratchRegister);
7349
7350 __ bind(&done);
7351}
7352
7353
Steve Block3ce2e202009-11-05 08:53:23 +00007354void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
7355 Label* non_float) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007356 Label test_other, done;
7357 // Test if both operands are numbers (heap_numbers or smis).
7358 // If not, jump to label non_float.
7359 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
7360 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
7361 __ j(not_equal, non_float); // The argument in rdx is not a number.
7362
7363 __ bind(&test_other);
7364 __ JumpIfSmi(rax, &done); // argument in rax is OK
7365 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
7366 __ j(not_equal, non_float); // The argument in rax is not a number.
7367
7368 // Fall-through: Both operands are numbers.
7369 __ bind(&done);
7370}
7371
7372
7373const char* GenericBinaryOpStub::GetName() {
7374 switch (op_) {
7375 case Token::ADD: return "GenericBinaryOpStub_ADD";
7376 case Token::SUB: return "GenericBinaryOpStub_SUB";
7377 case Token::MUL: return "GenericBinaryOpStub_MUL";
7378 case Token::DIV: return "GenericBinaryOpStub_DIV";
7379 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
7380 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
7381 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
7382 case Token::SAR: return "GenericBinaryOpStub_SAR";
7383 case Token::SHL: return "GenericBinaryOpStub_SHL";
7384 case Token::SHR: return "GenericBinaryOpStub_SHR";
7385 default: return "GenericBinaryOpStub";
7386 }
7387}
7388
7389
Steve Blockd0582a62009-12-15 09:54:21 +00007390void GenericBinaryOpStub::GenerateCall(
7391 MacroAssembler* masm,
7392 Register left,
7393 Register right) {
7394 if (!ArgsInRegistersSupported()) {
7395 // Pass arguments on the stack.
7396 __ push(left);
7397 __ push(right);
7398 } else {
7399 // The calling convention with registers is left in rdx and right in rax.
7400 Register left_arg = rdx;
7401 Register right_arg = rax;
7402 if (!(left.is(left_arg) && right.is(right_arg))) {
7403 if (left.is(right_arg) && right.is(left_arg)) {
7404 if (IsOperationCommutative()) {
7405 SetArgsReversed();
7406 } else {
7407 __ xchg(left, right);
7408 }
7409 } else if (left.is(left_arg)) {
7410 __ movq(right_arg, right);
7411 } else if (left.is(right_arg)) {
7412 if (IsOperationCommutative()) {
7413 __ movq(left_arg, right);
7414 SetArgsReversed();
7415 } else {
7416 // Order of moves important to avoid destroying left argument.
7417 __ movq(left_arg, left);
7418 __ movq(right_arg, right);
7419 }
7420 } else if (right.is(left_arg)) {
7421 if (IsOperationCommutative()) {
7422 __ movq(right_arg, left);
7423 SetArgsReversed();
7424 } else {
7425 // Order of moves important to avoid destroying right argument.
7426 __ movq(right_arg, right);
7427 __ movq(left_arg, left);
7428 }
7429 } else if (right.is(right_arg)) {
7430 __ movq(left_arg, left);
7431 } else {
7432 // Order of moves is not important.
7433 __ movq(left_arg, left);
7434 __ movq(right_arg, right);
7435 }
7436 }
7437
7438 // Update flags to indicate that arguments are in registers.
7439 SetArgsInRegisters();
7440 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7441 }
7442
7443 // Call the stub.
7444 __ CallStub(this);
7445}
7446
7447
7448void GenericBinaryOpStub::GenerateCall(
7449 MacroAssembler* masm,
7450 Register left,
7451 Smi* right) {
7452 if (!ArgsInRegistersSupported()) {
7453 // Pass arguments on the stack.
7454 __ push(left);
7455 __ Push(right);
7456 } else {
7457 // The calling convention with registers is left in rdx and right in rax.
7458 Register left_arg = rdx;
7459 Register right_arg = rax;
7460 if (left.is(left_arg)) {
7461 __ Move(right_arg, right);
7462 } else if (left.is(right_arg) && IsOperationCommutative()) {
7463 __ Move(left_arg, right);
7464 SetArgsReversed();
7465 } else {
7466 __ movq(left_arg, left);
7467 __ Move(right_arg, right);
7468 }
7469
7470 // Update flags to indicate that arguments are in registers.
7471 SetArgsInRegisters();
7472 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7473 }
7474
7475 // Call the stub.
7476 __ CallStub(this);
7477}
7478
7479
7480void GenericBinaryOpStub::GenerateCall(
7481 MacroAssembler* masm,
7482 Smi* left,
7483 Register right) {
7484 if (!ArgsInRegistersSupported()) {
7485 // Pass arguments on the stack.
7486 __ Push(left);
7487 __ push(right);
7488 } else {
7489 // The calling convention with registers is left in rdx and right in rax.
7490 Register left_arg = rdx;
7491 Register right_arg = rax;
7492 if (right.is(right_arg)) {
7493 __ Move(left_arg, left);
7494 } else if (right.is(left_arg) && IsOperationCommutative()) {
7495 __ Move(right_arg, left);
7496 SetArgsReversed();
7497 } else {
7498 __ Move(left_arg, left);
7499 __ movq(right_arg, right);
7500 }
7501 // Update flags to indicate that arguments are in registers.
7502 SetArgsInRegisters();
7503 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7504 }
7505
7506 // Call the stub.
7507 __ CallStub(this);
7508}
7509
7510
Steve Blocka7e24c12009-10-30 11:49:00 +00007511void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7512 // Perform fast-case smi code for the operation (rax <op> rbx) and
7513 // leave result in register rax.
7514
7515 // Smi check both operands.
7516 __ JumpIfNotBothSmi(rax, rbx, slow);
7517
7518 switch (op_) {
7519 case Token::ADD: {
7520 __ SmiAdd(rax, rax, rbx, slow);
7521 break;
7522 }
7523
7524 case Token::SUB: {
7525 __ SmiSub(rax, rax, rbx, slow);
7526 break;
7527 }
7528
7529 case Token::MUL:
7530 __ SmiMul(rax, rax, rbx, slow);
7531 break;
7532
7533 case Token::DIV:
7534 __ SmiDiv(rax, rax, rbx, slow);
7535 break;
7536
7537 case Token::MOD:
7538 __ SmiMod(rax, rax, rbx, slow);
7539 break;
7540
7541 case Token::BIT_OR:
7542 __ SmiOr(rax, rax, rbx);
7543 break;
7544
7545 case Token::BIT_AND:
7546 __ SmiAnd(rax, rax, rbx);
7547 break;
7548
7549 case Token::BIT_XOR:
7550 __ SmiXor(rax, rax, rbx);
7551 break;
7552
7553 case Token::SHL:
7554 case Token::SHR:
7555 case Token::SAR:
7556 // Move the second operand into register ecx.
Steve Block3ce2e202009-11-05 08:53:23 +00007557 __ movq(rcx, rbx);
Steve Blocka7e24c12009-10-30 11:49:00 +00007558 // Perform the operation.
7559 switch (op_) {
7560 case Token::SAR:
Steve Block3ce2e202009-11-05 08:53:23 +00007561 __ SmiShiftArithmeticRight(rax, rax, rcx);
Steve Blocka7e24c12009-10-30 11:49:00 +00007562 break;
7563 case Token::SHR:
Steve Block3ce2e202009-11-05 08:53:23 +00007564 __ SmiShiftLogicalRight(rax, rax, rcx, slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007565 break;
7566 case Token::SHL:
Steve Block3ce2e202009-11-05 08:53:23 +00007567 __ SmiShiftLeft(rax, rax, rcx, slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007568 break;
7569 default:
7570 UNREACHABLE();
7571 }
7572 break;
7573
7574 default:
7575 UNREACHABLE();
7576 break;
7577 }
7578}
7579
7580
7581void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7582 Label call_runtime;
Steve Blockd0582a62009-12-15 09:54:21 +00007583 if (HasSmiCodeInStub()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007584 // The fast case smi code wasn't inlined in the stub caller
7585 // code. Generate it here to speed up common operations.
7586 Label slow;
7587 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
7588 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
7589 GenerateSmiCode(masm, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00007590 GenerateReturn(masm);
Steve Blocka7e24c12009-10-30 11:49:00 +00007591
7592 // Too bad. The fast case smi code didn't succeed.
7593 __ bind(&slow);
7594 }
7595
Steve Blockd0582a62009-12-15 09:54:21 +00007596 // Make sure the arguments are in rdx and rax.
7597 GenerateLoadArguments(masm);
Steve Blocka7e24c12009-10-30 11:49:00 +00007598
7599 // Floating point case.
7600 switch (op_) {
7601 case Token::ADD:
7602 case Token::SUB:
7603 case Token::MUL:
7604 case Token::DIV: {
7605 // rax: y
7606 // rdx: x
Steve Block3ce2e202009-11-05 08:53:23 +00007607 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007608 // Fast-case: Both operands are numbers.
7609 // Allocate a heap number, if needed.
7610 Label skip_allocation;
7611 switch (mode_) {
7612 case OVERWRITE_LEFT:
7613 __ movq(rax, rdx);
7614 // Fall through!
7615 case OVERWRITE_RIGHT:
7616 // If the argument in rax is already an object, we skip the
7617 // allocation of a heap number.
7618 __ JumpIfNotSmi(rax, &skip_allocation);
7619 // Fall through!
7620 case NO_OVERWRITE:
Steve Blockd0582a62009-12-15 09:54:21 +00007621 // Allocate a heap number for the result. Keep rax and rdx intact
7622 // for the possible runtime call.
7623 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
7624 __ movq(rax, rbx);
Steve Blocka7e24c12009-10-30 11:49:00 +00007625 __ bind(&skip_allocation);
7626 break;
7627 default: UNREACHABLE();
7628 }
7629 // xmm4 and xmm5 are volatile XMM registers.
7630 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
7631
7632 switch (op_) {
7633 case Token::ADD: __ addsd(xmm4, xmm5); break;
7634 case Token::SUB: __ subsd(xmm4, xmm5); break;
7635 case Token::MUL: __ mulsd(xmm4, xmm5); break;
7636 case Token::DIV: __ divsd(xmm4, xmm5); break;
7637 default: UNREACHABLE();
7638 }
7639 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
Steve Blockd0582a62009-12-15 09:54:21 +00007640 GenerateReturn(masm);
Steve Blocka7e24c12009-10-30 11:49:00 +00007641 }
7642 case Token::MOD: {
7643 // For MOD we go directly to runtime in the non-smi case.
7644 break;
7645 }
7646 case Token::BIT_OR:
7647 case Token::BIT_AND:
7648 case Token::BIT_XOR:
7649 case Token::SAR:
7650 case Token::SHL:
7651 case Token::SHR: {
Steve Block3ce2e202009-11-05 08:53:23 +00007652 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007653 // TODO(X64): Don't convert a Smi to float and then back to int32
7654 // afterwards.
7655 FloatingPointHelper::LoadFloatOperands(masm);
7656
7657 Label skip_allocation, non_smi_result, operand_conversion_failure;
7658
7659 // Reserve space for converted numbers.
7660 __ subq(rsp, Immediate(2 * kPointerSize));
7661
7662 if (use_sse3_) {
7663 // Truncate the operands to 32-bit integers and check for
7664 // exceptions in doing so.
Steve Blockd0582a62009-12-15 09:54:21 +00007665 CpuFeatures::Scope scope(SSE3);
Steve Blocka7e24c12009-10-30 11:49:00 +00007666 __ fisttp_s(Operand(rsp, 0 * kPointerSize));
7667 __ fisttp_s(Operand(rsp, 1 * kPointerSize));
7668 __ fnstsw_ax();
7669 __ testl(rax, Immediate(1));
7670 __ j(not_zero, &operand_conversion_failure);
7671 } else {
7672 // Check if right operand is int32.
7673 __ fist_s(Operand(rsp, 0 * kPointerSize));
7674 __ fild_s(Operand(rsp, 0 * kPointerSize));
Steve Block3ce2e202009-11-05 08:53:23 +00007675 __ FCmp();
7676 __ j(not_zero, &operand_conversion_failure);
7677 __ j(parity_even, &operand_conversion_failure);
7678
Steve Blocka7e24c12009-10-30 11:49:00 +00007679 // Check if left operand is int32.
7680 __ fist_s(Operand(rsp, 1 * kPointerSize));
7681 __ fild_s(Operand(rsp, 1 * kPointerSize));
Steve Block3ce2e202009-11-05 08:53:23 +00007682 __ FCmp();
7683 __ j(not_zero, &operand_conversion_failure);
7684 __ j(parity_even, &operand_conversion_failure);
Steve Blocka7e24c12009-10-30 11:49:00 +00007685 }
7686
7687 // Get int32 operands and perform bitop.
7688 __ pop(rcx);
7689 __ pop(rax);
7690 switch (op_) {
Steve Block3ce2e202009-11-05 08:53:23 +00007691 case Token::BIT_OR: __ orl(rax, rcx); break;
7692 case Token::BIT_AND: __ andl(rax, rcx); break;
7693 case Token::BIT_XOR: __ xorl(rax, rcx); break;
Steve Blockd0582a62009-12-15 09:54:21 +00007694 case Token::SAR: __ sarl_cl(rax); break;
7695 case Token::SHL: __ shll_cl(rax); break;
7696 case Token::SHR: __ shrl_cl(rax); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00007697 default: UNREACHABLE();
7698 }
7699 if (op_ == Token::SHR) {
Steve Block3ce2e202009-11-05 08:53:23 +00007700 // Check if result is non-negative. This can only happen for a shift
7701 // by zero, which also doesn't update the sign flag.
7702 __ testl(rax, rax);
Steve Blocka7e24c12009-10-30 11:49:00 +00007703 __ j(negative, &non_smi_result);
7704 }
Steve Block3ce2e202009-11-05 08:53:23 +00007705 __ JumpIfNotValidSmiValue(rax, &non_smi_result);
7706 // Tag smi result, if possible, and return.
Steve Blocka7e24c12009-10-30 11:49:00 +00007707 __ Integer32ToSmi(rax, rax);
Steve Blockd0582a62009-12-15 09:54:21 +00007708 GenerateReturn(masm);
Steve Blocka7e24c12009-10-30 11:49:00 +00007709
7710 // All ops except SHR return a signed int32 that we load in a HeapNumber.
Steve Block3ce2e202009-11-05 08:53:23 +00007711 if (op_ != Token::SHR && non_smi_result.is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007712 __ bind(&non_smi_result);
7713 // Allocate a heap number if needed.
7714 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
7715 switch (mode_) {
7716 case OVERWRITE_LEFT:
7717 case OVERWRITE_RIGHT:
7718 // If the operand was an object, we skip the
7719 // allocation of a heap number.
7720 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7721 1 * kPointerSize : 2 * kPointerSize));
7722 __ JumpIfNotSmi(rax, &skip_allocation);
7723 // Fall through!
7724 case NO_OVERWRITE:
Steve Block3ce2e202009-11-05 08:53:23 +00007725 __ AllocateHeapNumber(rax, rcx, &call_runtime);
Steve Blocka7e24c12009-10-30 11:49:00 +00007726 __ bind(&skip_allocation);
7727 break;
7728 default: UNREACHABLE();
7729 }
7730 // Store the result in the HeapNumber and return.
7731 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
7732 __ fild_s(Operand(rsp, 1 * kPointerSize));
7733 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
Steve Blockd0582a62009-12-15 09:54:21 +00007734 GenerateReturn(masm);
Steve Blocka7e24c12009-10-30 11:49:00 +00007735 }
7736
7737 // Clear the FPU exception flag and reset the stack before calling
7738 // the runtime system.
7739 __ bind(&operand_conversion_failure);
7740 __ addq(rsp, Immediate(2 * kPointerSize));
7741 if (use_sse3_) {
7742 // If we've used the SSE3 instructions for truncating the
7743 // floating point values to integers and it failed, we have a
7744 // pending #IA exception. Clear it.
7745 __ fnclex();
7746 } else {
7747 // The non-SSE3 variant does early bailout if the right
7748 // operand isn't a 32-bit integer, so we may have a single
7749 // value on the FPU stack we need to get rid of.
7750 __ ffree(0);
7751 }
7752
7753 // SHR should return uint32 - go to runtime for non-smi/negative result.
7754 if (op_ == Token::SHR) {
7755 __ bind(&non_smi_result);
7756 }
7757 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7758 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7759 break;
7760 }
7761 default: UNREACHABLE(); break;
7762 }
7763
7764 // If all else fails, use the runtime system to get the correct
Steve Blockd0582a62009-12-15 09:54:21 +00007765 // result. If arguments was passed in registers now place them on the
7766 // stack in the correct order below the return address.
Steve Blocka7e24c12009-10-30 11:49:00 +00007767 __ bind(&call_runtime);
Steve Blockd0582a62009-12-15 09:54:21 +00007768 if (HasArgumentsInRegisters()) {
7769 __ pop(rcx);
7770 if (HasArgumentsReversed()) {
7771 __ push(rax);
7772 __ push(rdx);
7773 } else {
7774 __ push(rdx);
7775 __ push(rax);
7776 }
7777 __ push(rcx);
7778 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007779 switch (op_) {
Steve Blockd0582a62009-12-15 09:54:21 +00007780 case Token::ADD: {
7781 // Test for string arguments before calling runtime.
7782 Label not_strings, both_strings, not_string1, string1;
7783 Condition is_smi;
7784 Result answer;
7785 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument.
7786 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument.
7787 is_smi = masm->CheckSmi(rdx);
7788 __ j(is_smi, &not_string1);
7789 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
7790 __ j(above_equal, &not_string1);
7791
7792 // First argument is a a string, test second.
7793 is_smi = masm->CheckSmi(rax);
7794 __ j(is_smi, &string1);
7795 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
7796 __ j(above_equal, &string1);
7797
7798 // First and second argument are strings.
7799 Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
7800 __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
7801
7802 // Only first argument is a string.
7803 __ bind(&string1);
7804 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
7805
7806 // First argument was not a string, test second.
7807 __ bind(&not_string1);
7808 is_smi = masm->CheckSmi(rax);
7809 __ j(is_smi, &not_strings);
7810 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
7811 __ j(above_equal, &not_strings);
7812
7813 // Only second argument is a string.
7814 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
7815
7816 __ bind(&not_strings);
7817 // Neither argument is a string.
Steve Blocka7e24c12009-10-30 11:49:00 +00007818 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
7819 break;
Steve Blockd0582a62009-12-15 09:54:21 +00007820 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007821 case Token::SUB:
7822 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
7823 break;
7824 case Token::MUL:
7825 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
7826 break;
7827 case Token::DIV:
7828 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
7829 break;
7830 case Token::MOD:
7831 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
7832 break;
7833 case Token::BIT_OR:
7834 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
7835 break;
7836 case Token::BIT_AND:
7837 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
7838 break;
7839 case Token::BIT_XOR:
7840 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
7841 break;
7842 case Token::SAR:
7843 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
7844 break;
7845 case Token::SHL:
7846 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
7847 break;
7848 case Token::SHR:
7849 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
7850 break;
7851 default:
7852 UNREACHABLE();
7853 }
7854}
7855
7856
Steve Blockd0582a62009-12-15 09:54:21 +00007857void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
7858 // If arguments are not passed in registers read them from the stack.
7859 if (!HasArgumentsInRegisters()) {
7860 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7861 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7862 }
7863}
7864
7865
7866void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
7867 // If arguments are not passed in registers remove them from the stack before
7868 // returning.
7869 if (!HasArgumentsInRegisters()) {
7870 __ ret(2 * kPointerSize); // Remove both operands
7871 } else {
7872 __ ret(0);
7873 }
7874}
7875
7876
Steve Blocka7e24c12009-10-30 11:49:00 +00007877int CompareStub::MinorKey() {
7878 // Encode the two parameters in a unique 16 bit value.
7879 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7880 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7881}
7882
Steve Block3ce2e202009-11-05 08:53:23 +00007883#undef __
7884
7885#define __ masm.
7886
7887#ifdef _WIN64
7888typedef double (*ModuloFunction)(double, double);
7889// Define custom fmod implementation.
7890ModuloFunction CreateModuloFunction() {
7891 size_t actual_size;
7892 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
7893 &actual_size,
7894 true));
7895 CHECK(buffer);
Steve Blockd0582a62009-12-15 09:54:21 +00007896 Assembler masm(buffer, static_cast<int>(actual_size));
Steve Block3ce2e202009-11-05 08:53:23 +00007897 // Generated code is put into a fixed, unmovable, buffer, and not into
7898 // the V8 heap. We can't, and don't, refer to any relocatable addresses
7899 // (e.g. the JavaScript nan-object).
7900
7901 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
7902 // returns result in xmm0.
7903 // Argument backing space is allocated on the stack above
7904 // the return address.
7905
7906 // Compute x mod y.
7907 // Load y and x (use argument backing store as temporary storage).
7908 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
7909 __ movsd(Operand(rsp, kPointerSize), xmm0);
7910 __ fld_d(Operand(rsp, kPointerSize * 2));
7911 __ fld_d(Operand(rsp, kPointerSize));
7912
7913 // Clear exception flags before operation.
7914 {
7915 Label no_exceptions;
7916 __ fwait();
7917 __ fnstsw_ax();
7918 // Clear if Illegal Operand or Zero Division exceptions are set.
7919 __ testb(rax, Immediate(5));
7920 __ j(zero, &no_exceptions);
7921 __ fnclex();
7922 __ bind(&no_exceptions);
7923 }
7924
7925 // Compute st(0) % st(1)
7926 {
7927 Label partial_remainder_loop;
7928 __ bind(&partial_remainder_loop);
7929 __ fprem();
7930 __ fwait();
7931 __ fnstsw_ax();
7932 __ testl(rax, Immediate(0x400 /* C2 */));
7933 // If C2 is set, computation only has partial result. Loop to
7934 // continue computation.
7935 __ j(not_zero, &partial_remainder_loop);
7936 }
7937
7938 Label valid_result;
7939 Label return_result;
7940 // If Invalid Operand or Zero Division exceptions are set,
7941 // return NaN.
7942 __ testb(rax, Immediate(5));
7943 __ j(zero, &valid_result);
7944 __ fstp(0); // Drop result in st(0).
7945 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
7946 __ movq(rcx, kNaNValue, RelocInfo::NONE);
7947 __ movq(Operand(rsp, kPointerSize), rcx);
7948 __ movsd(xmm0, Operand(rsp, kPointerSize));
7949 __ jmp(&return_result);
7950
7951 // If result is valid, return that.
7952 __ bind(&valid_result);
7953 __ fstp_d(Operand(rsp, kPointerSize));
7954 __ movsd(xmm0, Operand(rsp, kPointerSize));
7955
7956 // Clean up FPU stack and exceptions and return xmm0
7957 __ bind(&return_result);
7958 __ fstp(0); // Unload y.
7959
7960 Label clear_exceptions;
7961 __ testb(rax, Immediate(0x3f /* Any Exception*/));
7962 __ j(not_zero, &clear_exceptions);
7963 __ ret(0);
7964 __ bind(&clear_exceptions);
7965 __ fnclex();
7966 __ ret(0);
7967
7968 CodeDesc desc;
7969 masm.GetCode(&desc);
7970 // Call the function from C++.
7971 return FUNCTION_CAST<ModuloFunction>(buffer);
7972}
7973
7974#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00007975
7976#undef __
7977
7978} } // namespace v8::internal