blob: 0e314b9fcd85e8db39ffe5711e03c1f61e65ea11 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "debug.h"
33#include "ic-inl.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39namespace v8 {
40namespace internal {
41
42#define __ ACCESS_MASM(masm_)
43
44// -------------------------------------------------------------------------
45// Platform-specific DeferredCode functions.
46
47void DeferredCode::SaveRegisters() {
48 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
49 int action = registers_[i];
50 if (action == kPush) {
51 __ push(RegisterAllocator::ToRegister(i));
52 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
53 __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
54 }
55 }
56}
57
58
59void DeferredCode::RestoreRegisters() {
60 // Restore registers in reverse order due to the stack.
61 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
62 int action = registers_[i];
63 if (action == kPush) {
64 __ pop(RegisterAllocator::ToRegister(i));
65 } else if (action != kIgnore) {
66 action &= ~kSyncedFlag;
67 __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
68 }
69 }
70}
71
72
73// -------------------------------------------------------------------------
74// CodeGenState implementation.
75
76CodeGenState::CodeGenState(CodeGenerator* owner)
77 : owner_(owner),
78 typeof_state_(NOT_INSIDE_TYPEOF),
79 destination_(NULL),
80 previous_(NULL) {
81 owner_->set_state(this);
82}
83
84
85CodeGenState::CodeGenState(CodeGenerator* owner,
86 TypeofState typeof_state,
87 ControlDestination* destination)
88 : owner_(owner),
89 typeof_state_(typeof_state),
90 destination_(destination),
91 previous_(owner->state()) {
92 owner_->set_state(this);
93}
94
95
96CodeGenState::~CodeGenState() {
97 ASSERT(owner_->state() == this);
98 owner_->set_state(previous_);
99}
100
101
102// -------------------------------------------------------------------------
103// CodeGenerator implementation
104
105CodeGenerator::CodeGenerator(int buffer_size,
106 Handle<Script> script,
107 bool is_eval)
108 : is_eval_(is_eval),
109 script_(script),
110 deferred_(8),
111 masm_(new MacroAssembler(NULL, buffer_size)),
112 scope_(NULL),
113 frame_(NULL),
114 allocator_(NULL),
115 state_(NULL),
116 loop_nesting_(0),
117 function_return_is_shadowed_(false),
118 in_spilled_code_(false) {
119}
120
121
122// Calling conventions:
123// ebp: caller's frame pointer
124// esp: stack pointer
125// edi: called JS function
126// esi: callee's context
127
128void CodeGenerator::GenCode(FunctionLiteral* fun) {
129 // Record the position for debugging purposes.
130 CodeForFunctionPosition(fun);
131
132 ZoneList<Statement*>* body = fun->body();
133
134 // Initialize state.
135 ASSERT(scope_ == NULL);
136 scope_ = fun->scope();
137 ASSERT(allocator_ == NULL);
138 RegisterAllocator register_allocator(this);
139 allocator_ = &register_allocator;
140 ASSERT(frame_ == NULL);
141 frame_ = new VirtualFrame();
142 set_in_spilled_code(false);
143
144 // Adjust for function-level loop nesting.
145 loop_nesting_ += fun->loop_nesting();
146
147 JumpTarget::set_compiling_deferred_code(false);
148
149#ifdef DEBUG
150 if (strlen(FLAG_stop_at) > 0 &&
151 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
152 frame_->SpillAll();
153 __ int3();
154 }
155#endif
156
157 // New scope to get automatic timing calculation.
158 { // NOLINT
159 HistogramTimerScope codegen_timer(&Counters::code_generation);
160 CodeGenState state(this);
161
162 // Entry:
163 // Stack: receiver, arguments, return address.
164 // ebp: caller's frame pointer
165 // esp: stack pointer
166 // edi: called JS function
167 // esi: callee's context
168 allocator_->Initialize();
169 frame_->Enter();
170
171 // Allocate space for locals and initialize them.
172 frame_->AllocateStackSlots();
173 // Initialize the function return target after the locals are set
174 // up, because it needs the expected frame height from the frame.
175 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
176 function_return_is_shadowed_ = false;
177
178 // Allocate the local context if needed.
179 if (scope_->num_heap_slots() > 0) {
180 Comment cmnt(masm_, "[ allocate local context");
181 // Allocate local context.
182 // Get outer context and create a new context based on it.
183 frame_->PushFunction();
184 Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
185
186 // Update context local.
187 frame_->SaveContextRegister();
188
189 // Verify that the runtime call result and esi agree.
190 if (FLAG_debug_code) {
191 __ cmp(context.reg(), Operand(esi));
192 __ Assert(equal, "Runtime::NewContext should end up in esi");
193 }
194 }
195
196 // TODO(1241774): Improve this code:
197 // 1) only needed if we have a context
198 // 2) no need to recompute context ptr every single time
199 // 3) don't copy parameter operand code from SlotOperand!
200 {
201 Comment cmnt2(masm_, "[ copy context parameters into .context");
202
203 // Note that iteration order is relevant here! If we have the same
204 // parameter twice (e.g., function (x, y, x)), and that parameter
205 // needs to be copied into the context, it must be the last argument
206 // passed to the parameter that needs to be copied. This is a rare
207 // case so we don't check for it, instead we rely on the copying
208 // order: such a parameter is copied repeatedly into the same
209 // context location and thus the last value is what is seen inside
210 // the function.
211 for (int i = 0; i < scope_->num_parameters(); i++) {
212 Variable* par = scope_->parameter(i);
213 Slot* slot = par->slot();
214 if (slot != NULL && slot->type() == Slot::CONTEXT) {
215 // The use of SlotOperand below is safe in unspilled code
216 // because the slot is guaranteed to be a context slot.
217 //
218 // There are no parameters in the global scope.
219 ASSERT(!scope_->is_global_scope());
220 frame_->PushParameterAt(i);
221 Result value = frame_->Pop();
222 value.ToRegister();
223
224 // SlotOperand loads context.reg() with the context object
225 // stored to, used below in RecordWrite.
226 Result context = allocator_->Allocate();
227 ASSERT(context.is_valid());
228 __ mov(SlotOperand(slot, context.reg()), value.reg());
229 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
230 Result scratch = allocator_->Allocate();
231 ASSERT(scratch.is_valid());
232 frame_->Spill(context.reg());
233 frame_->Spill(value.reg());
234 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
235 }
236 }
237 }
238
239 // Store the arguments object. This must happen after context
240 // initialization because the arguments object may be stored in
241 // the context.
242 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
243 StoreArgumentsObject(true);
244 }
245
246 // Generate code to 'execute' declarations and initialize functions
247 // (source elements). In case of an illegal redeclaration we need to
248 // handle that instead of processing the declarations.
249 if (scope_->HasIllegalRedeclaration()) {
250 Comment cmnt(masm_, "[ illegal redeclarations");
251 scope_->VisitIllegalRedeclaration(this);
252 } else {
253 Comment cmnt(masm_, "[ declarations");
254 ProcessDeclarations(scope_->declarations());
255 // Bail out if a stack-overflow exception occurred when processing
256 // declarations.
257 if (HasStackOverflow()) return;
258 }
259
260 if (FLAG_trace) {
261 frame_->CallRuntime(Runtime::kTraceEnter, 0);
262 // Ignore the return value.
263 }
264 CheckStack();
265
266 // Compile the body of the function in a vanilla state. Don't
267 // bother compiling all the code if the scope has an illegal
268 // redeclaration.
269 if (!scope_->HasIllegalRedeclaration()) {
270 Comment cmnt(masm_, "[ function body");
271#ifdef DEBUG
272 bool is_builtin = Bootstrapper::IsActive();
273 bool should_trace =
274 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
275 if (should_trace) {
276 frame_->CallRuntime(Runtime::kDebugTrace, 0);
277 // Ignore the return value.
278 }
279#endif
280 VisitStatements(body);
281
282 // Handle the return from the function.
283 if (has_valid_frame()) {
284 // If there is a valid frame, control flow can fall off the end of
285 // the body. In that case there is an implicit return statement.
286 ASSERT(!function_return_is_shadowed_);
287 CodeForReturnPosition(fun);
288 frame_->PrepareForReturn();
289 Result undefined(Factory::undefined_value());
290 if (function_return_.is_bound()) {
291 function_return_.Jump(&undefined);
292 } else {
293 function_return_.Bind(&undefined);
294 GenerateReturnSequence(&undefined);
295 }
296 } else if (function_return_.is_linked()) {
297 // If the return target has dangling jumps to it, then we have not
298 // yet generated the return sequence. This can happen when (a)
299 // control does not flow off the end of the body so we did not
300 // compile an artificial return statement just above, and (b) there
301 // are return statements in the body but (c) they are all shadowed.
302 Result return_value;
303 function_return_.Bind(&return_value);
304 GenerateReturnSequence(&return_value);
305 }
306 }
307 }
308
309 // Adjust for function-level loop nesting.
310 loop_nesting_ -= fun->loop_nesting();
311
312 // Code generation state must be reset.
313 ASSERT(state_ == NULL);
314 ASSERT(loop_nesting() == 0);
315 ASSERT(!function_return_is_shadowed_);
316 function_return_.Unuse();
317 DeleteFrame();
318
319 // Process any deferred code using the register allocator.
320 if (!HasStackOverflow()) {
321 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
322 JumpTarget::set_compiling_deferred_code(true);
323 ProcessDeferred();
324 JumpTarget::set_compiling_deferred_code(false);
325 }
326
327 // There is no need to delete the register allocator, it is a
328 // stack-allocated local.
329 allocator_ = NULL;
330 scope_ = NULL;
331}
332
333
334Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
335 // Currently, this assertion will fail if we try to assign to
336 // a constant variable that is constant because it is read-only
337 // (such as the variable referring to a named function expression).
338 // We need to implement assignments to read-only variables.
339 // Ideally, we should do this during AST generation (by converting
340 // such assignments into expression statements); however, in general
341 // we may not be able to make the decision until past AST generation,
342 // that is when the entire program is known.
343 ASSERT(slot != NULL);
344 int index = slot->index();
345 switch (slot->type()) {
346 case Slot::PARAMETER:
347 return frame_->ParameterAt(index);
348
349 case Slot::LOCAL:
350 return frame_->LocalAt(index);
351
352 case Slot::CONTEXT: {
353 // Follow the context chain if necessary.
354 ASSERT(!tmp.is(esi)); // do not overwrite context register
355 Register context = esi;
356 int chain_length = scope()->ContextChainLength(slot->var()->scope());
357 for (int i = 0; i < chain_length; i++) {
358 // Load the closure.
359 // (All contexts, even 'with' contexts, have a closure,
360 // and it is the same for all contexts inside a function.
361 // There is no need to go to the function context first.)
362 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
363 // Load the function context (which is the incoming, outer context).
364 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
365 context = tmp;
366 }
367 // We may have a 'with' context now. Get the function context.
368 // (In fact this mov may never be the needed, since the scope analysis
369 // may not permit a direct context access in this case and thus we are
370 // always at a function context. However it is safe to dereference be-
371 // cause the function context of a function context is itself. Before
372 // deleting this mov we should try to create a counter-example first,
373 // though...)
374 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
375 return ContextOperand(tmp, index);
376 }
377
378 default:
379 UNREACHABLE();
380 return Operand(eax);
381 }
382}
383
384
385Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
386 Result tmp,
387 JumpTarget* slow) {
388 ASSERT(slot->type() == Slot::CONTEXT);
389 ASSERT(tmp.is_register());
390 Register context = esi;
391
392 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
393 if (s->num_heap_slots() > 0) {
394 if (s->calls_eval()) {
395 // Check that extension is NULL.
396 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
397 Immediate(0));
398 slow->Branch(not_equal, not_taken);
399 }
400 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
401 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
402 context = tmp.reg();
403 }
404 }
405 // Check that last extension is NULL.
406 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
407 slow->Branch(not_equal, not_taken);
408 __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
409 return ContextOperand(tmp.reg(), slot->index());
410}
411
412
413// Emit code to load the value of an expression to the top of the
414// frame. If the expression is boolean-valued it may be compiled (or
415// partially compiled) into control flow to the control destination.
416// If force_control is true, control flow is forced.
417void CodeGenerator::LoadCondition(Expression* x,
418 TypeofState typeof_state,
419 ControlDestination* dest,
420 bool force_control) {
421 ASSERT(!in_spilled_code());
422 int original_height = frame_->height();
423
424 { CodeGenState new_state(this, typeof_state, dest);
425 Visit(x);
426
427 // If we hit a stack overflow, we may not have actually visited
428 // the expression. In that case, we ensure that we have a
429 // valid-looking frame state because we will continue to generate
430 // code as we unwind the C++ stack.
431 //
432 // It's possible to have both a stack overflow and a valid frame
433 // state (eg, a subexpression overflowed, visiting it returned
434 // with a dummied frame state, and visiting this expression
435 // returned with a normal-looking state).
436 if (HasStackOverflow() &&
437 !dest->is_used() &&
438 frame_->height() == original_height) {
439 dest->Goto(true);
440 }
441 }
442
443 if (force_control && !dest->is_used()) {
444 // Convert the TOS value into flow to the control destination.
445 ToBoolean(dest);
446 }
447
448 ASSERT(!(force_control && !dest->is_used()));
449 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
450}
451
452
453void CodeGenerator::LoadAndSpill(Expression* expression,
454 TypeofState typeof_state) {
455 ASSERT(in_spilled_code());
456 set_in_spilled_code(false);
457 Load(expression, typeof_state);
458 frame_->SpillAll();
459 set_in_spilled_code(true);
460}
461
462
463void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
464#ifdef DEBUG
465 int original_height = frame_->height();
466#endif
467 ASSERT(!in_spilled_code());
468 JumpTarget true_target;
469 JumpTarget false_target;
470 ControlDestination dest(&true_target, &false_target, true);
471 LoadCondition(x, typeof_state, &dest, false);
472
473 if (dest.false_was_fall_through()) {
474 // The false target was just bound.
475 JumpTarget loaded;
476 frame_->Push(Factory::false_value());
477 // There may be dangling jumps to the true target.
478 if (true_target.is_linked()) {
479 loaded.Jump();
480 true_target.Bind();
481 frame_->Push(Factory::true_value());
482 loaded.Bind();
483 }
484
485 } else if (dest.is_used()) {
486 // There is true, and possibly false, control flow (with true as
487 // the fall through).
488 JumpTarget loaded;
489 frame_->Push(Factory::true_value());
490 if (false_target.is_linked()) {
491 loaded.Jump();
492 false_target.Bind();
493 frame_->Push(Factory::false_value());
494 loaded.Bind();
495 }
496
497 } else {
498 // We have a valid value on top of the frame, but we still may
499 // have dangling jumps to the true and false targets from nested
500 // subexpressions (eg, the left subexpressions of the
501 // short-circuited boolean operators).
502 ASSERT(has_valid_frame());
503 if (true_target.is_linked() || false_target.is_linked()) {
504 JumpTarget loaded;
505 loaded.Jump(); // Don't lose the current TOS.
506 if (true_target.is_linked()) {
507 true_target.Bind();
508 frame_->Push(Factory::true_value());
509 if (false_target.is_linked()) {
510 loaded.Jump();
511 }
512 }
513 if (false_target.is_linked()) {
514 false_target.Bind();
515 frame_->Push(Factory::false_value());
516 }
517 loaded.Bind();
518 }
519 }
520
521 ASSERT(has_valid_frame());
522 ASSERT(frame_->height() == original_height + 1);
523}
524
525
526void CodeGenerator::LoadGlobal() {
527 if (in_spilled_code()) {
528 frame_->EmitPush(GlobalObject());
529 } else {
530 Result temp = allocator_->Allocate();
531 __ mov(temp.reg(), GlobalObject());
532 frame_->Push(&temp);
533 }
534}
535
536
537void CodeGenerator::LoadGlobalReceiver() {
538 Result temp = allocator_->Allocate();
539 Register reg = temp.reg();
540 __ mov(reg, GlobalObject());
541 __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
542 frame_->Push(&temp);
543}
544
545
546// TODO(1241834): Get rid of this function in favor of just using Load, now
547// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
548// variables w/o reference errors elsewhere.
549void CodeGenerator::LoadTypeofExpression(Expression* x) {
550 Variable* variable = x->AsVariableProxy()->AsVariable();
551 if (variable != NULL && !variable->is_this() && variable->is_global()) {
552 // NOTE: This is somewhat nasty. We force the compiler to load
553 // the variable as if through '<global>.<variable>' to make sure we
554 // do not get reference errors.
555 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
556 Literal key(variable->name());
557 // TODO(1241834): Fetch the position from the variable instead of using
558 // no position.
559 Property property(&global, &key, RelocInfo::kNoPosition);
560 Load(&property);
561 } else {
562 Load(x, INSIDE_TYPEOF);
563 }
564}
565
566
567ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
568 if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
569 ASSERT(scope_->arguments_shadow() != NULL);
570 // We don't want to do lazy arguments allocation for functions that
571 // have heap-allocated contexts, because it interfers with the
572 // uninitialized const tracking in the context objects.
573 return (scope_->num_heap_slots() > 0)
574 ? EAGER_ARGUMENTS_ALLOCATION
575 : LAZY_ARGUMENTS_ALLOCATION;
576}
577
578
579Result CodeGenerator::StoreArgumentsObject(bool initial) {
580 ArgumentsAllocationMode mode = ArgumentsMode();
581 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
582
583 Comment cmnt(masm_, "[ store arguments object");
584 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
585 // When using lazy arguments allocation, we store the hole value
586 // as a sentinel indicating that the arguments object hasn't been
587 // allocated yet.
588 frame_->Push(Factory::the_hole_value());
589 } else {
590 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
591 frame_->PushFunction();
592 frame_->PushReceiverSlotAddress();
593 frame_->Push(Smi::FromInt(scope_->num_parameters()));
594 Result result = frame_->CallStub(&stub, 3);
595 frame_->Push(&result);
596 }
597
598 { Reference shadow_ref(this, scope_->arguments_shadow());
599 Reference arguments_ref(this, scope_->arguments());
600 ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
601 // Here we rely on the convenient property that references to slot
602 // take up zero space in the frame (ie, it doesn't matter that the
603 // stored value is actually below the reference on the frame).
604 JumpTarget done;
605 bool skip_arguments = false;
606 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
607 // We have to skip storing into the arguments slot if it has
608 // already been written to. This can happen if the a function
609 // has a local variable named 'arguments'.
610 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
611 Result arguments = frame_->Pop();
612 if (arguments.is_constant()) {
613 // We have to skip updating the arguments object if it has
614 // been assigned a proper value.
615 skip_arguments = !arguments.handle()->IsTheHole();
616 } else {
617 __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
618 arguments.Unuse();
619 done.Branch(not_equal);
620 }
621 }
622 if (!skip_arguments) {
623 arguments_ref.SetValue(NOT_CONST_INIT);
624 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
625 }
626 shadow_ref.SetValue(NOT_CONST_INIT);
627 }
628 return frame_->Pop();
629}
630
631
632Reference::Reference(CodeGenerator* cgen, Expression* expression)
633 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
634 cgen->LoadReference(this);
635}
636
637
638Reference::~Reference() {
639 cgen_->UnloadReference(this);
640}
641
642
643void CodeGenerator::LoadReference(Reference* ref) {
644 // References are loaded from both spilled and unspilled code. Set the
645 // state to unspilled to allow that (and explicitly spill after
646 // construction at the construction sites).
647 bool was_in_spilled_code = in_spilled_code_;
648 in_spilled_code_ = false;
649
650 Comment cmnt(masm_, "[ LoadReference");
651 Expression* e = ref->expression();
652 Property* property = e->AsProperty();
653 Variable* var = e->AsVariableProxy()->AsVariable();
654
655 if (property != NULL) {
656 // The expression is either a property or a variable proxy that rewrites
657 // to a property.
658 Load(property->obj());
659 // We use a named reference if the key is a literal symbol, unless it is
660 // a string that can be legally parsed as an integer. This is because
661 // otherwise we will not get into the slow case code that handles [] on
662 // String objects.
663 Literal* literal = property->key()->AsLiteral();
664 uint32_t dummy;
665 if (literal != NULL &&
666 literal->handle()->IsSymbol() &&
667 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
668 ref->set_type(Reference::NAMED);
669 } else {
670 Load(property->key());
671 ref->set_type(Reference::KEYED);
672 }
673 } else if (var != NULL) {
674 // The expression is a variable proxy that does not rewrite to a
675 // property. Global variables are treated as named property references.
676 if (var->is_global()) {
677 LoadGlobal();
678 ref->set_type(Reference::NAMED);
679 } else {
680 ASSERT(var->slot() != NULL);
681 ref->set_type(Reference::SLOT);
682 }
683 } else {
684 // Anything else is a runtime error.
685 Load(e);
686 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
687 }
688
689 in_spilled_code_ = was_in_spilled_code;
690}
691
692
693void CodeGenerator::UnloadReference(Reference* ref) {
694 // Pop a reference from the stack while preserving TOS.
695 Comment cmnt(masm_, "[ UnloadReference");
696 frame_->Nip(ref->size());
697}
698
699
700class ToBooleanStub: public CodeStub {
701 public:
702 ToBooleanStub() { }
703
704 void Generate(MacroAssembler* masm);
705
706 private:
707 Major MajorKey() { return ToBoolean; }
708 int MinorKey() { return 0; }
709};
710
711
712// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
713// convert it to a boolean in the condition code register or jump to
714// 'false_target'/'true_target' as appropriate.
715void CodeGenerator::ToBoolean(ControlDestination* dest) {
716 Comment cmnt(masm_, "[ ToBoolean");
717
718 // The value to convert should be popped from the frame.
719 Result value = frame_->Pop();
720 value.ToRegister();
721 // Fast case checks.
722
723 // 'false' => false.
724 __ cmp(value.reg(), Factory::false_value());
725 dest->false_target()->Branch(equal);
726
727 // 'true' => true.
728 __ cmp(value.reg(), Factory::true_value());
729 dest->true_target()->Branch(equal);
730
731 // 'undefined' => false.
732 __ cmp(value.reg(), Factory::undefined_value());
733 dest->false_target()->Branch(equal);
734
735 // Smi => false iff zero.
736 ASSERT(kSmiTag == 0);
737 __ test(value.reg(), Operand(value.reg()));
738 dest->false_target()->Branch(zero);
739 __ test(value.reg(), Immediate(kSmiTagMask));
740 dest->true_target()->Branch(zero);
741
742 // Call the stub for all other cases.
743 frame_->Push(&value); // Undo the Pop() from above.
744 ToBooleanStub stub;
745 Result temp = frame_->CallStub(&stub, 1);
746 // Convert the result to a condition code.
747 __ test(temp.reg(), Operand(temp.reg()));
748 temp.Unuse();
749 dest->Split(not_equal);
750}
751
752
753class FloatingPointHelper : public AllStatic {
754 public:
755 // Code pattern for loading a floating point value. Input value must
756 // be either a smi or a heap number object (fp value). Requirements:
757 // operand in register number. Returns operand as floating point number
758 // on FPU stack.
759 static void LoadFloatOperand(MacroAssembler* masm, Register number);
760 // Code pattern for loading floating point values. Input values must
761 // be either smi or heap number objects (fp values). Requirements:
762 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
763 // floating point numbers on FPU stack.
764 static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
765 // Test if operands are smi or number objects (fp). Requirements:
766 // operand_1 in eax, operand_2 in edx; falls through on float
767 // operands, jumps to the non_float label otherwise.
768 static void CheckFloatOperands(MacroAssembler* masm,
769 Label* non_float,
770 Register scratch);
771 // Test if operands are numbers (smi or HeapNumber objects), and load
772 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
773 // either operand is not a number. Operands are in edx and eax.
774 // Leaves operands unchanged.
775 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
776 // Allocate a heap number in new space with undefined value.
777 // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
778 static void AllocateHeapNumber(MacroAssembler* masm,
779 Label* need_gc,
780 Register scratch1,
781 Register scratch2,
782 Register result);
783};
784
785
786const char* GenericBinaryOpStub::GetName() {
787 switch (op_) {
788 case Token::ADD: return "GenericBinaryOpStub_ADD";
789 case Token::SUB: return "GenericBinaryOpStub_SUB";
790 case Token::MUL: return "GenericBinaryOpStub_MUL";
791 case Token::DIV: return "GenericBinaryOpStub_DIV";
792 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
793 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
794 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
795 case Token::SAR: return "GenericBinaryOpStub_SAR";
796 case Token::SHL: return "GenericBinaryOpStub_SHL";
797 case Token::SHR: return "GenericBinaryOpStub_SHR";
798 default: return "GenericBinaryOpStub";
799 }
800}
801
802
803// Call the specialized stub for a binary operation.
804class DeferredInlineBinaryOperation: public DeferredCode {
805 public:
806 DeferredInlineBinaryOperation(Token::Value op,
807 Register dst,
808 Register left,
809 Register right,
810 OverwriteMode mode)
811 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
812 set_comment("[ DeferredInlineBinaryOperation");
813 }
814
815 virtual void Generate();
816
817 private:
818 Token::Value op_;
819 Register dst_;
820 Register left_;
821 Register right_;
822 OverwriteMode mode_;
823};
824
825
826void DeferredInlineBinaryOperation::Generate() {
827 __ push(left_);
828 __ push(right_);
829 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
830 __ CallStub(&stub);
831 if (!dst_.is(eax)) __ mov(dst_, eax);
832}
833
834
835void CodeGenerator::GenericBinaryOperation(Token::Value op,
836 SmiAnalysis* type,
837 OverwriteMode overwrite_mode) {
838 Comment cmnt(masm_, "[ BinaryOperation");
839 Comment cmnt_token(masm_, Token::String(op));
840
841 if (op == Token::COMMA) {
842 // Simply discard left value.
843 frame_->Nip(1);
844 return;
845 }
846
847 // Set the flags based on the operation, type and loop nesting level.
848 GenericBinaryFlags flags;
849 switch (op) {
850 case Token::BIT_OR:
851 case Token::BIT_AND:
852 case Token::BIT_XOR:
853 case Token::SHL:
854 case Token::SHR:
855 case Token::SAR:
856 // Bit operations always assume they likely operate on Smis. Still only
857 // generate the inline Smi check code if this operation is part of a loop.
858 flags = (loop_nesting() > 0)
859 ? SMI_CODE_INLINED
860 : SMI_CODE_IN_STUB;
861 break;
862
863 default:
864 // By default only inline the Smi check code for likely smis if this
865 // operation is part of a loop.
866 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
867 ? SMI_CODE_INLINED
868 : SMI_CODE_IN_STUB;
869 break;
870 }
871
872 Result right = frame_->Pop();
873 Result left = frame_->Pop();
874
875 if (op == Token::ADD) {
876 bool left_is_string = left.is_constant() && left.handle()->IsString();
877 bool right_is_string = right.is_constant() && right.handle()->IsString();
878 if (left_is_string || right_is_string) {
879 frame_->Push(&left);
880 frame_->Push(&right);
881 Result answer;
882 if (left_is_string) {
883 if (right_is_string) {
884 // TODO(lrn): if both are constant strings
885 // -- do a compile time cons, if allocation during codegen is allowed.
886 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
887 } else {
888 answer =
889 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
890 }
891 } else if (right_is_string) {
892 answer =
893 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
894 }
895 frame_->Push(&answer);
896 return;
897 }
898 // Neither operand is known to be a string.
899 }
900
901 bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
902 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
903 bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
904 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
905 bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
906
907 if (left_is_smi && right_is_smi) {
908 // Compute the constant result at compile time, and leave it on the frame.
909 int left_int = Smi::cast(*left.handle())->value();
910 int right_int = Smi::cast(*right.handle())->value();
911 if (FoldConstantSmis(op, left_int, right_int)) return;
912 }
913
914 if (left_is_non_smi || right_is_non_smi) {
915 // Set flag so that we go straight to the slow case, with no smi code.
916 generate_no_smi_code = true;
917 } else if (right_is_smi) {
918 ConstantSmiBinaryOperation(op, &left, right.handle(),
919 type, false, overwrite_mode);
920 return;
921 } else if (left_is_smi) {
922 ConstantSmiBinaryOperation(op, &right, left.handle(),
923 type, true, overwrite_mode);
924 return;
925 }
926
927 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
928 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
929 } else {
930 frame_->Push(&left);
931 frame_->Push(&right);
932 // If we know the arguments aren't smis, use the binary operation stub
933 // that does not check for the fast smi case.
934 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
935 if (generate_no_smi_code) {
936 flags = SMI_CODE_INLINED;
937 }
938 GenericBinaryOpStub stub(op, overwrite_mode, flags);
939 Result answer = frame_->CallStub(&stub, 2);
940 frame_->Push(&answer);
941 }
942}
943
944
945bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
946 Object* answer_object = Heap::undefined_value();
947 switch (op) {
948 case Token::ADD:
949 if (Smi::IsValid(left + right)) {
950 answer_object = Smi::FromInt(left + right);
951 }
952 break;
953 case Token::SUB:
954 if (Smi::IsValid(left - right)) {
955 answer_object = Smi::FromInt(left - right);
956 }
957 break;
958 case Token::MUL: {
959 double answer = static_cast<double>(left) * right;
960 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
961 // If the product is zero and the non-zero factor is negative,
962 // the spec requires us to return floating point negative zero.
963 if (answer != 0 || (left >= 0 && right >= 0)) {
964 answer_object = Smi::FromInt(static_cast<int>(answer));
965 }
966 }
967 }
968 break;
969 case Token::DIV:
970 case Token::MOD:
971 break;
972 case Token::BIT_OR:
973 answer_object = Smi::FromInt(left | right);
974 break;
975 case Token::BIT_AND:
976 answer_object = Smi::FromInt(left & right);
977 break;
978 case Token::BIT_XOR:
979 answer_object = Smi::FromInt(left ^ right);
980 break;
981
982 case Token::SHL: {
983 int shift_amount = right & 0x1F;
984 if (Smi::IsValid(left << shift_amount)) {
985 answer_object = Smi::FromInt(left << shift_amount);
986 }
987 break;
988 }
989 case Token::SHR: {
990 int shift_amount = right & 0x1F;
991 unsigned int unsigned_left = left;
992 unsigned_left >>= shift_amount;
993 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
994 answer_object = Smi::FromInt(unsigned_left);
995 }
996 break;
997 }
998 case Token::SAR: {
999 int shift_amount = right & 0x1F;
1000 unsigned int unsigned_left = left;
1001 if (left < 0) {
1002 // Perform arithmetic shift of a negative number by
1003 // complementing number, logical shifting, complementing again.
1004 unsigned_left = ~unsigned_left;
1005 unsigned_left >>= shift_amount;
1006 unsigned_left = ~unsigned_left;
1007 } else {
1008 unsigned_left >>= shift_amount;
1009 }
1010 ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
1011 answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
1012 break;
1013 }
1014 default:
1015 UNREACHABLE();
1016 break;
1017 }
1018 if (answer_object == Heap::undefined_value()) {
1019 return false;
1020 }
1021 frame_->Push(Handle<Object>(answer_object));
1022 return true;
1023}
1024
1025
1026// Implements a binary operation using a deferred code object and some
1027// inline code to operate on smis quickly.
1028void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
1029 Result* left,
1030 Result* right,
1031 OverwriteMode overwrite_mode) {
1032 // Special handling of div and mod because they use fixed registers.
1033 if (op == Token::DIV || op == Token::MOD) {
1034 // We need eax as the quotient register, edx as the remainder
1035 // register, neither left nor right in eax or edx, and left copied
1036 // to eax.
1037 Result quotient;
1038 Result remainder;
1039 bool left_is_in_eax = false;
1040 // Step 1: get eax for quotient.
1041 if ((left->is_register() && left->reg().is(eax)) ||
1042 (right->is_register() && right->reg().is(eax))) {
1043 // One or both is in eax. Use a fresh non-edx register for
1044 // them.
1045 Result fresh = allocator_->Allocate();
1046 ASSERT(fresh.is_valid());
1047 if (fresh.reg().is(edx)) {
1048 remainder = fresh;
1049 fresh = allocator_->Allocate();
1050 ASSERT(fresh.is_valid());
1051 }
1052 if (left->is_register() && left->reg().is(eax)) {
1053 quotient = *left;
1054 *left = fresh;
1055 left_is_in_eax = true;
1056 }
1057 if (right->is_register() && right->reg().is(eax)) {
1058 quotient = *right;
1059 *right = fresh;
1060 }
1061 __ mov(fresh.reg(), eax);
1062 } else {
1063 // Neither left nor right is in eax.
1064 quotient = allocator_->Allocate(eax);
1065 }
1066 ASSERT(quotient.is_register() && quotient.reg().is(eax));
1067 ASSERT(!(left->is_register() && left->reg().is(eax)));
1068 ASSERT(!(right->is_register() && right->reg().is(eax)));
1069
1070 // Step 2: get edx for remainder if necessary.
1071 if (!remainder.is_valid()) {
1072 if ((left->is_register() && left->reg().is(edx)) ||
1073 (right->is_register() && right->reg().is(edx))) {
1074 Result fresh = allocator_->Allocate();
1075 ASSERT(fresh.is_valid());
1076 if (left->is_register() && left->reg().is(edx)) {
1077 remainder = *left;
1078 *left = fresh;
1079 }
1080 if (right->is_register() && right->reg().is(edx)) {
1081 remainder = *right;
1082 *right = fresh;
1083 }
1084 __ mov(fresh.reg(), edx);
1085 } else {
1086 // Neither left nor right is in edx.
1087 remainder = allocator_->Allocate(edx);
1088 }
1089 }
1090 ASSERT(remainder.is_register() && remainder.reg().is(edx));
1091 ASSERT(!(left->is_register() && left->reg().is(edx)));
1092 ASSERT(!(right->is_register() && right->reg().is(edx)));
1093
1094 left->ToRegister();
1095 right->ToRegister();
1096 frame_->Spill(eax);
1097 frame_->Spill(edx);
1098
1099 // Check that left and right are smi tagged.
1100 DeferredInlineBinaryOperation* deferred =
1101 new DeferredInlineBinaryOperation(op,
1102 (op == Token::DIV) ? eax : edx,
1103 left->reg(),
1104 right->reg(),
1105 overwrite_mode);
1106 if (left->reg().is(right->reg())) {
1107 __ test(left->reg(), Immediate(kSmiTagMask));
1108 } else {
1109 // Use the quotient register as a scratch for the tag check.
1110 if (!left_is_in_eax) __ mov(eax, left->reg());
1111 left_is_in_eax = false; // About to destroy the value in eax.
1112 __ or_(eax, Operand(right->reg()));
1113 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1114 __ test(eax, Immediate(kSmiTagMask));
1115 }
1116 deferred->Branch(not_zero);
1117
1118 if (!left_is_in_eax) __ mov(eax, left->reg());
1119 // Sign extend eax into edx:eax.
1120 __ cdq();
1121 // Check for 0 divisor.
1122 __ test(right->reg(), Operand(right->reg()));
1123 deferred->Branch(zero);
1124 // Divide edx:eax by the right operand.
1125 __ idiv(right->reg());
1126
1127 // Complete the operation.
1128 if (op == Token::DIV) {
1129 // Check for negative zero result. If result is zero, and divisor
1130 // is negative, return a floating point negative zero. The
1131 // virtual frame is unchanged in this block, so local control flow
1132 // can use a Label rather than a JumpTarget.
1133 Label non_zero_result;
1134 __ test(left->reg(), Operand(left->reg()));
1135 __ j(not_zero, &non_zero_result);
1136 __ test(right->reg(), Operand(right->reg()));
1137 deferred->Branch(negative);
1138 __ bind(&non_zero_result);
1139 // Check for the corner case of dividing the most negative smi by
1140 // -1. We cannot use the overflow flag, since it is not set by
1141 // idiv instruction.
1142 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1143 __ cmp(eax, 0x40000000);
1144 deferred->Branch(equal);
1145 // Check that the remainder is zero.
1146 __ test(edx, Operand(edx));
1147 deferred->Branch(not_zero);
1148 // Tag the result and store it in the quotient register.
1149 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1150 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
1151 deferred->BindExit();
1152 left->Unuse();
1153 right->Unuse();
1154 frame_->Push(&quotient);
1155 } else {
1156 ASSERT(op == Token::MOD);
1157 // Check for a negative zero result. If the result is zero, and
1158 // the dividend is negative, return a floating point negative
1159 // zero. The frame is unchanged in this block, so local control
1160 // flow can use a Label rather than a JumpTarget.
1161 Label non_zero_result;
1162 __ test(edx, Operand(edx));
1163 __ j(not_zero, &non_zero_result, taken);
1164 __ test(left->reg(), Operand(left->reg()));
1165 deferred->Branch(negative);
1166 __ bind(&non_zero_result);
1167 deferred->BindExit();
1168 left->Unuse();
1169 right->Unuse();
1170 frame_->Push(&remainder);
1171 }
1172 return;
1173 }
1174
1175 // Special handling of shift operations because they use fixed
1176 // registers.
1177 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
1178 // Move left out of ecx if necessary.
1179 if (left->is_register() && left->reg().is(ecx)) {
1180 *left = allocator_->Allocate();
1181 ASSERT(left->is_valid());
1182 __ mov(left->reg(), ecx);
1183 }
1184 right->ToRegister(ecx);
1185 left->ToRegister();
1186 ASSERT(left->is_register() && !left->reg().is(ecx));
1187 ASSERT(right->is_register() && right->reg().is(ecx));
1188
1189 // We will modify right, it must be spilled.
1190 frame_->Spill(ecx);
1191
1192 // Use a fresh answer register to avoid spilling the left operand.
1193 Result answer = allocator_->Allocate();
1194 ASSERT(answer.is_valid());
1195 // Check that both operands are smis using the answer register as a
1196 // temporary.
1197 DeferredInlineBinaryOperation* deferred =
1198 new DeferredInlineBinaryOperation(op,
1199 answer.reg(),
1200 left->reg(),
1201 ecx,
1202 overwrite_mode);
1203 __ mov(answer.reg(), left->reg());
1204 __ or_(answer.reg(), Operand(ecx));
1205 __ test(answer.reg(), Immediate(kSmiTagMask));
1206 deferred->Branch(not_zero);
1207
1208 // Untag both operands.
1209 __ mov(answer.reg(), left->reg());
1210 __ sar(answer.reg(), kSmiTagSize);
1211 __ sar(ecx, kSmiTagSize);
1212 // Perform the operation.
1213 switch (op) {
1214 case Token::SAR:
1215 __ sar(answer.reg());
1216 // No checks of result necessary
1217 break;
1218 case Token::SHR: {
1219 Label result_ok;
1220 __ shr(answer.reg());
1221 // Check that the *unsigned* result fits in a smi. Neither of
1222 // the two high-order bits can be set:
1223 // * 0x80000000: high bit would be lost when smi tagging.
1224 // * 0x40000000: this number would convert to negative when smi
1225 // tagging.
1226 // These two cases can only happen with shifts by 0 or 1 when
1227 // handed a valid smi. If the answer cannot be represented by a
1228 // smi, restore the left and right arguments, and jump to slow
1229 // case. The low bit of the left argument may be lost, but only
1230 // in a case where it is dropped anyway.
1231 __ test(answer.reg(), Immediate(0xc0000000));
1232 __ j(zero, &result_ok);
1233 ASSERT(kSmiTag == 0);
1234 __ shl(ecx, kSmiTagSize);
1235 deferred->Jump();
1236 __ bind(&result_ok);
1237 break;
1238 }
1239 case Token::SHL: {
1240 Label result_ok;
1241 __ shl(answer.reg());
1242 // Check that the *signed* result fits in a smi.
1243 __ cmp(answer.reg(), 0xc0000000);
1244 __ j(positive, &result_ok);
1245 ASSERT(kSmiTag == 0);
1246 __ shl(ecx, kSmiTagSize);
1247 deferred->Jump();
1248 __ bind(&result_ok);
1249 break;
1250 }
1251 default:
1252 UNREACHABLE();
1253 }
1254 // Smi-tag the result in answer.
1255 ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
1256 __ lea(answer.reg(),
1257 Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
1258 deferred->BindExit();
1259 left->Unuse();
1260 right->Unuse();
1261 frame_->Push(&answer);
1262 return;
1263 }
1264
1265 // Handle the other binary operations.
1266 left->ToRegister();
1267 right->ToRegister();
1268 // A newly allocated register answer is used to hold the answer. The
1269 // registers containing left and right are not modified so they don't
1270 // need to be spilled in the fast case.
1271 Result answer = allocator_->Allocate();
1272 ASSERT(answer.is_valid());
1273
1274 // Perform the smi tag check.
1275 DeferredInlineBinaryOperation* deferred =
1276 new DeferredInlineBinaryOperation(op,
1277 answer.reg(),
1278 left->reg(),
1279 right->reg(),
1280 overwrite_mode);
1281 if (left->reg().is(right->reg())) {
1282 __ test(left->reg(), Immediate(kSmiTagMask));
1283 } else {
1284 __ mov(answer.reg(), left->reg());
1285 __ or_(answer.reg(), Operand(right->reg()));
1286 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1287 __ test(answer.reg(), Immediate(kSmiTagMask));
1288 }
1289 deferred->Branch(not_zero);
1290 __ mov(answer.reg(), left->reg());
1291 switch (op) {
1292 case Token::ADD:
1293 __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
1294 deferred->Branch(overflow);
1295 break;
1296
1297 case Token::SUB:
1298 __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
1299 deferred->Branch(overflow);
1300 break;
1301
1302 case Token::MUL: {
1303 // If the smi tag is 0 we can just leave the tag on one operand.
1304 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1305 // Remove smi tag from the left operand (but keep sign).
1306 // Left-hand operand has been copied into answer.
1307 __ sar(answer.reg(), kSmiTagSize);
1308 // Do multiplication of smis, leaving result in answer.
1309 __ imul(answer.reg(), Operand(right->reg()));
1310 // Go slow on overflows.
1311 deferred->Branch(overflow);
1312 // Check for negative zero result. If product is zero, and one
1313 // argument is negative, go to slow case. The frame is unchanged
1314 // in this block, so local control flow can use a Label rather
1315 // than a JumpTarget.
1316 Label non_zero_result;
1317 __ test(answer.reg(), Operand(answer.reg()));
1318 __ j(not_zero, &non_zero_result, taken);
1319 __ mov(answer.reg(), left->reg());
1320 __ or_(answer.reg(), Operand(right->reg()));
1321 deferred->Branch(negative);
1322 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
1323 __ bind(&non_zero_result);
1324 break;
1325 }
1326
1327 case Token::BIT_OR:
1328 __ or_(answer.reg(), Operand(right->reg()));
1329 break;
1330
1331 case Token::BIT_AND:
1332 __ and_(answer.reg(), Operand(right->reg()));
1333 break;
1334
1335 case Token::BIT_XOR:
1336 __ xor_(answer.reg(), Operand(right->reg()));
1337 break;
1338
1339 default:
1340 UNREACHABLE();
1341 break;
1342 }
1343 deferred->BindExit();
1344 left->Unuse();
1345 right->Unuse();
1346 frame_->Push(&answer);
1347}
1348
1349
1350// Call the appropriate binary operation stub to compute src op value
1351// and leave the result in dst.
1352class DeferredInlineSmiOperation: public DeferredCode {
1353 public:
1354 DeferredInlineSmiOperation(Token::Value op,
1355 Register dst,
1356 Register src,
1357 Smi* value,
1358 OverwriteMode overwrite_mode)
1359 : op_(op),
1360 dst_(dst),
1361 src_(src),
1362 value_(value),
1363 overwrite_mode_(overwrite_mode) {
1364 set_comment("[ DeferredInlineSmiOperation");
1365 }
1366
1367 virtual void Generate();
1368
1369 private:
1370 Token::Value op_;
1371 Register dst_;
1372 Register src_;
1373 Smi* value_;
1374 OverwriteMode overwrite_mode_;
1375};
1376
1377
1378void DeferredInlineSmiOperation::Generate() {
1379 __ push(src_);
1380 __ push(Immediate(value_));
1381 // For mod we don't generate all the Smi code inline.
1382 GenericBinaryOpStub stub(
1383 op_,
1384 overwrite_mode_,
1385 (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
1386 __ CallStub(&stub);
1387 if (!dst_.is(eax)) __ mov(dst_, eax);
1388}
1389
1390
1391// Call the appropriate binary operation stub to compute value op src
1392// and leave the result in dst.
1393class DeferredInlineSmiOperationReversed: public DeferredCode {
1394 public:
1395 DeferredInlineSmiOperationReversed(Token::Value op,
1396 Register dst,
1397 Smi* value,
1398 Register src,
1399 OverwriteMode overwrite_mode)
1400 : op_(op),
1401 dst_(dst),
1402 value_(value),
1403 src_(src),
1404 overwrite_mode_(overwrite_mode) {
1405 set_comment("[ DeferredInlineSmiOperationReversed");
1406 }
1407
1408 virtual void Generate();
1409
1410 private:
1411 Token::Value op_;
1412 Register dst_;
1413 Smi* value_;
1414 Register src_;
1415 OverwriteMode overwrite_mode_;
1416};
1417
1418
1419void DeferredInlineSmiOperationReversed::Generate() {
1420 __ push(Immediate(value_));
1421 __ push(src_);
1422 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
1423 __ CallStub(&igostub);
1424 if (!dst_.is(eax)) __ mov(dst_, eax);
1425}
1426
1427
1428// The result of src + value is in dst. It either overflowed or was not
1429// smi tagged. Undo the speculative addition and call the appropriate
1430// specialized stub for add. The result is left in dst.
1431class DeferredInlineSmiAdd: public DeferredCode {
1432 public:
1433 DeferredInlineSmiAdd(Register dst,
1434 Smi* value,
1435 OverwriteMode overwrite_mode)
1436 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1437 set_comment("[ DeferredInlineSmiAdd");
1438 }
1439
1440 virtual void Generate();
1441
1442 private:
1443 Register dst_;
1444 Smi* value_;
1445 OverwriteMode overwrite_mode_;
1446};
1447
1448
1449void DeferredInlineSmiAdd::Generate() {
1450 // Undo the optimistic add operation and call the shared stub.
1451 __ sub(Operand(dst_), Immediate(value_));
1452 __ push(dst_);
1453 __ push(Immediate(value_));
1454 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1455 __ CallStub(&igostub);
1456 if (!dst_.is(eax)) __ mov(dst_, eax);
1457}
1458
1459
1460// The result of value + src is in dst. It either overflowed or was not
1461// smi tagged. Undo the speculative addition and call the appropriate
1462// specialized stub for add. The result is left in dst.
1463class DeferredInlineSmiAddReversed: public DeferredCode {
1464 public:
1465 DeferredInlineSmiAddReversed(Register dst,
1466 Smi* value,
1467 OverwriteMode overwrite_mode)
1468 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1469 set_comment("[ DeferredInlineSmiAddReversed");
1470 }
1471
1472 virtual void Generate();
1473
1474 private:
1475 Register dst_;
1476 Smi* value_;
1477 OverwriteMode overwrite_mode_;
1478};
1479
1480
1481void DeferredInlineSmiAddReversed::Generate() {
1482 // Undo the optimistic add operation and call the shared stub.
1483 __ sub(Operand(dst_), Immediate(value_));
1484 __ push(Immediate(value_));
1485 __ push(dst_);
1486 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1487 __ CallStub(&igostub);
1488 if (!dst_.is(eax)) __ mov(dst_, eax);
1489}
1490
1491
1492// The result of src - value is in dst. It either overflowed or was not
1493// smi tagged. Undo the speculative subtraction and call the
1494// appropriate specialized stub for subtract. The result is left in
1495// dst.
1496class DeferredInlineSmiSub: public DeferredCode {
1497 public:
1498 DeferredInlineSmiSub(Register dst,
1499 Smi* value,
1500 OverwriteMode overwrite_mode)
1501 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1502 set_comment("[ DeferredInlineSmiSub");
1503 }
1504
1505 virtual void Generate();
1506
1507 private:
1508 Register dst_;
1509 Smi* value_;
1510 OverwriteMode overwrite_mode_;
1511};
1512
1513
1514void DeferredInlineSmiSub::Generate() {
1515 // Undo the optimistic sub operation and call the shared stub.
1516 __ add(Operand(dst_), Immediate(value_));
1517 __ push(dst_);
1518 __ push(Immediate(value_));
1519 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
1520 __ CallStub(&igostub);
1521 if (!dst_.is(eax)) __ mov(dst_, eax);
1522}
1523
1524
1525void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
1526 Result* operand,
1527 Handle<Object> value,
1528 SmiAnalysis* type,
1529 bool reversed,
1530 OverwriteMode overwrite_mode) {
1531 // NOTE: This is an attempt to inline (a bit) more of the code for
1532 // some possible smi operations (like + and -) when (at least) one
1533 // of the operands is a constant smi.
1534 // Consumes the argument "operand".
1535
1536 // TODO(199): Optimize some special cases of operations involving a
1537 // smi literal (multiply by 2, shift by 0, etc.).
1538 if (IsUnsafeSmi(value)) {
1539 Result unsafe_operand(value);
1540 if (reversed) {
1541 LikelySmiBinaryOperation(op, &unsafe_operand, operand,
1542 overwrite_mode);
1543 } else {
1544 LikelySmiBinaryOperation(op, operand, &unsafe_operand,
1545 overwrite_mode);
1546 }
1547 ASSERT(!operand->is_valid());
1548 return;
1549 }
1550
1551 // Get the literal value.
1552 Smi* smi_value = Smi::cast(*value);
1553 int int_value = smi_value->value();
1554
1555 switch (op) {
1556 case Token::ADD: {
1557 operand->ToRegister();
1558 frame_->Spill(operand->reg());
1559
1560 // Optimistically add. Call the specialized add stub if the
1561 // result is not a smi or overflows.
1562 DeferredCode* deferred = NULL;
1563 if (reversed) {
1564 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
1565 smi_value,
1566 overwrite_mode);
1567 } else {
1568 deferred = new DeferredInlineSmiAdd(operand->reg(),
1569 smi_value,
1570 overwrite_mode);
1571 }
1572 __ add(Operand(operand->reg()), Immediate(value));
1573 deferred->Branch(overflow);
1574 __ test(operand->reg(), Immediate(kSmiTagMask));
1575 deferred->Branch(not_zero);
1576 deferred->BindExit();
1577 frame_->Push(operand);
1578 break;
1579 }
1580
1581 case Token::SUB: {
1582 DeferredCode* deferred = NULL;
1583 Result answer; // Only allocate a new register if reversed.
1584 if (reversed) {
1585 // The reversed case is only hit when the right operand is not a
1586 // constant.
1587 ASSERT(operand->is_register());
1588 answer = allocator()->Allocate();
1589 ASSERT(answer.is_valid());
1590 __ Set(answer.reg(), Immediate(value));
1591 deferred = new DeferredInlineSmiOperationReversed(op,
1592 answer.reg(),
1593 smi_value,
1594 operand->reg(),
1595 overwrite_mode);
1596 __ sub(answer.reg(), Operand(operand->reg()));
1597 } else {
1598 operand->ToRegister();
1599 frame_->Spill(operand->reg());
1600 answer = *operand;
1601 deferred = new DeferredInlineSmiSub(operand->reg(),
1602 smi_value,
1603 overwrite_mode);
1604 __ sub(Operand(operand->reg()), Immediate(value));
1605 }
1606 deferred->Branch(overflow);
1607 __ test(answer.reg(), Immediate(kSmiTagMask));
1608 deferred->Branch(not_zero);
1609 deferred->BindExit();
1610 operand->Unuse();
1611 frame_->Push(&answer);
1612 break;
1613 }
1614
1615 case Token::SAR:
1616 if (reversed) {
1617 Result constant_operand(value);
1618 LikelySmiBinaryOperation(op, &constant_operand, operand,
1619 overwrite_mode);
1620 } else {
1621 // Only the least significant 5 bits of the shift value are used.
1622 // In the slow case, this masking is done inside the runtime call.
1623 int shift_value = int_value & 0x1f;
1624 operand->ToRegister();
1625 frame_->Spill(operand->reg());
1626 DeferredInlineSmiOperation* deferred =
1627 new DeferredInlineSmiOperation(op,
1628 operand->reg(),
1629 operand->reg(),
1630 smi_value,
1631 overwrite_mode);
1632 __ test(operand->reg(), Immediate(kSmiTagMask));
1633 deferred->Branch(not_zero);
1634 if (shift_value > 0) {
1635 __ sar(operand->reg(), shift_value);
1636 __ and_(operand->reg(), ~kSmiTagMask);
1637 }
1638 deferred->BindExit();
1639 frame_->Push(operand);
1640 }
1641 break;
1642
1643 case Token::SHR:
1644 if (reversed) {
1645 Result constant_operand(value);
1646 LikelySmiBinaryOperation(op, &constant_operand, operand,
1647 overwrite_mode);
1648 } else {
1649 // Only the least significant 5 bits of the shift value are used.
1650 // In the slow case, this masking is done inside the runtime call.
1651 int shift_value = int_value & 0x1f;
1652 operand->ToRegister();
1653 Result answer = allocator()->Allocate();
1654 ASSERT(answer.is_valid());
1655 DeferredInlineSmiOperation* deferred =
1656 new DeferredInlineSmiOperation(op,
1657 answer.reg(),
1658 operand->reg(),
1659 smi_value,
1660 overwrite_mode);
1661 __ test(operand->reg(), Immediate(kSmiTagMask));
1662 deferred->Branch(not_zero);
1663 __ mov(answer.reg(), operand->reg());
1664 __ sar(answer.reg(), kSmiTagSize);
1665 __ shr(answer.reg(), shift_value);
1666 // A negative Smi shifted right two is in the positive Smi range.
1667 if (shift_value < 2) {
1668 __ test(answer.reg(), Immediate(0xc0000000));
1669 deferred->Branch(not_zero);
1670 }
1671 operand->Unuse();
1672 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
1673 __ lea(answer.reg(),
1674 Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
1675 deferred->BindExit();
1676 frame_->Push(&answer);
1677 }
1678 break;
1679
1680 case Token::SHL:
1681 if (reversed) {
1682 Result constant_operand(value);
1683 LikelySmiBinaryOperation(op, &constant_operand, operand,
1684 overwrite_mode);
1685 } else {
1686 // Only the least significant 5 bits of the shift value are used.
1687 // In the slow case, this masking is done inside the runtime call.
1688 int shift_value = int_value & 0x1f;
1689 operand->ToRegister();
1690 if (shift_value == 0) {
1691 // Spill operand so it can be overwritten in the slow case.
1692 frame_->Spill(operand->reg());
1693 DeferredInlineSmiOperation* deferred =
1694 new DeferredInlineSmiOperation(op,
1695 operand->reg(),
1696 operand->reg(),
1697 smi_value,
1698 overwrite_mode);
1699 __ test(operand->reg(), Immediate(kSmiTagMask));
1700 deferred->Branch(not_zero);
1701 deferred->BindExit();
1702 frame_->Push(operand);
1703 } else {
1704 // Use a fresh temporary for nonzero shift values.
1705 Result answer = allocator()->Allocate();
1706 ASSERT(answer.is_valid());
1707 DeferredInlineSmiOperation* deferred =
1708 new DeferredInlineSmiOperation(op,
1709 answer.reg(),
1710 operand->reg(),
1711 smi_value,
1712 overwrite_mode);
1713 __ test(operand->reg(), Immediate(kSmiTagMask));
1714 deferred->Branch(not_zero);
1715 __ mov(answer.reg(), operand->reg());
1716 ASSERT(kSmiTag == 0); // adjust code if not the case
1717 // We do no shifts, only the Smi conversion, if shift_value is 1.
1718 if (shift_value > 1) {
1719 __ shl(answer.reg(), shift_value - 1);
1720 }
1721 // Convert int result to Smi, checking that it is in int range.
1722 ASSERT(kSmiTagSize == 1); // adjust code if not the case
1723 __ add(answer.reg(), Operand(answer.reg()));
1724 deferred->Branch(overflow);
1725 deferred->BindExit();
1726 operand->Unuse();
1727 frame_->Push(&answer);
1728 }
1729 }
1730 break;
1731
1732 case Token::BIT_OR:
1733 case Token::BIT_XOR:
1734 case Token::BIT_AND: {
1735 operand->ToRegister();
1736 frame_->Spill(operand->reg());
1737 DeferredCode* deferred = NULL;
1738 if (reversed) {
1739 deferred = new DeferredInlineSmiOperationReversed(op,
1740 operand->reg(),
1741 smi_value,
1742 operand->reg(),
1743 overwrite_mode);
1744 } else {
1745 deferred = new DeferredInlineSmiOperation(op,
1746 operand->reg(),
1747 operand->reg(),
1748 smi_value,
1749 overwrite_mode);
1750 }
1751 __ test(operand->reg(), Immediate(kSmiTagMask));
1752 deferred->Branch(not_zero);
1753 if (op == Token::BIT_AND) {
1754 __ and_(Operand(operand->reg()), Immediate(value));
1755 } else if (op == Token::BIT_XOR) {
1756 if (int_value != 0) {
1757 __ xor_(Operand(operand->reg()), Immediate(value));
1758 }
1759 } else {
1760 ASSERT(op == Token::BIT_OR);
1761 if (int_value != 0) {
1762 __ or_(Operand(operand->reg()), Immediate(value));
1763 }
1764 }
1765 deferred->BindExit();
1766 frame_->Push(operand);
1767 break;
1768 }
1769
1770 // Generate inline code for mod of powers of 2 and negative powers of 2.
1771 case Token::MOD:
1772 if (!reversed &&
1773 int_value != 0 &&
1774 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
1775 operand->ToRegister();
1776 frame_->Spill(operand->reg());
1777 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
1778 operand->reg(),
1779 operand->reg(),
1780 smi_value,
1781 overwrite_mode);
1782 // Check for negative or non-Smi left hand side.
1783 __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
1784 deferred->Branch(not_zero);
1785 if (int_value < 0) int_value = -int_value;
1786 if (int_value == 1) {
1787 __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
1788 } else {
1789 __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
1790 }
1791 deferred->BindExit();
1792 frame_->Push(operand);
1793 break;
1794 }
1795 // Fall through if we did not find a power of 2 on the right hand side!
1796
1797 default: {
1798 Result constant_operand(value);
1799 if (reversed) {
1800 LikelySmiBinaryOperation(op, &constant_operand, operand,
1801 overwrite_mode);
1802 } else {
1803 LikelySmiBinaryOperation(op, operand, &constant_operand,
1804 overwrite_mode);
1805 }
1806 break;
1807 }
1808 }
1809 ASSERT(!operand->is_valid());
1810}
1811
1812
1813void CodeGenerator::Comparison(Condition cc,
1814 bool strict,
1815 ControlDestination* dest) {
1816 // Strict only makes sense for equality comparisons.
1817 ASSERT(!strict || cc == equal);
1818
1819 Result left_side;
1820 Result right_side;
1821 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1822 if (cc == greater || cc == less_equal) {
1823 cc = ReverseCondition(cc);
1824 left_side = frame_->Pop();
1825 right_side = frame_->Pop();
1826 } else {
1827 right_side = frame_->Pop();
1828 left_side = frame_->Pop();
1829 }
1830 ASSERT(cc == less || cc == equal || cc == greater_equal);
1831
1832 // If either side is a constant smi, optimize the comparison.
1833 bool left_side_constant_smi =
1834 left_side.is_constant() && left_side.handle()->IsSmi();
1835 bool right_side_constant_smi =
1836 right_side.is_constant() && right_side.handle()->IsSmi();
1837 bool left_side_constant_null =
1838 left_side.is_constant() && left_side.handle()->IsNull();
1839 bool right_side_constant_null =
1840 right_side.is_constant() && right_side.handle()->IsNull();
1841
1842 if (left_side_constant_smi || right_side_constant_smi) {
1843 if (left_side_constant_smi && right_side_constant_smi) {
1844 // Trivial case, comparing two constants.
1845 int left_value = Smi::cast(*left_side.handle())->value();
1846 int right_value = Smi::cast(*right_side.handle())->value();
1847 switch (cc) {
1848 case less:
1849 dest->Goto(left_value < right_value);
1850 break;
1851 case equal:
1852 dest->Goto(left_value == right_value);
1853 break;
1854 case greater_equal:
1855 dest->Goto(left_value >= right_value);
1856 break;
1857 default:
1858 UNREACHABLE();
1859 }
1860 } else { // Only one side is a constant Smi.
1861 // If left side is a constant Smi, reverse the operands.
1862 // Since one side is a constant Smi, conversion order does not matter.
1863 if (left_side_constant_smi) {
1864 Result temp = left_side;
1865 left_side = right_side;
1866 right_side = temp;
1867 cc = ReverseCondition(cc);
1868 // This may reintroduce greater or less_equal as the value of cc.
1869 // CompareStub and the inline code both support all values of cc.
1870 }
1871 // Implement comparison against a constant Smi, inlining the case
1872 // where both sides are Smis.
1873 left_side.ToRegister();
1874
1875 // Here we split control flow to the stub call and inlined cases
1876 // before finally splitting it to the control destination. We use
1877 // a jump target and branching to duplicate the virtual frame at
1878 // the first split. We manually handle the off-frame references
1879 // by reconstituting them on the non-fall-through path.
1880 JumpTarget is_smi;
1881 Register left_reg = left_side.reg();
1882 Handle<Object> right_val = right_side.handle();
1883 __ test(left_side.reg(), Immediate(kSmiTagMask));
1884 is_smi.Branch(zero, taken);
1885
1886 // Setup and call the compare stub.
1887 CompareStub stub(cc, strict);
1888 Result result = frame_->CallStub(&stub, &left_side, &right_side);
1889 result.ToRegister();
1890 __ cmp(result.reg(), 0);
1891 result.Unuse();
1892 dest->true_target()->Branch(cc);
1893 dest->false_target()->Jump();
1894
1895 is_smi.Bind();
1896 left_side = Result(left_reg);
1897 right_side = Result(right_val);
1898 // Test smi equality and comparison by signed int comparison.
1899 if (IsUnsafeSmi(right_side.handle())) {
1900 right_side.ToRegister();
1901 __ cmp(left_side.reg(), Operand(right_side.reg()));
1902 } else {
1903 __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
1904 }
1905 left_side.Unuse();
1906 right_side.Unuse();
1907 dest->Split(cc);
1908 }
1909 } else if (cc == equal &&
1910 (left_side_constant_null || right_side_constant_null)) {
1911 // To make null checks efficient, we check if either the left side or
1912 // the right side is the constant 'null'.
1913 // If so, we optimize the code by inlining a null check instead of
1914 // calling the (very) general runtime routine for checking equality.
1915 Result operand = left_side_constant_null ? right_side : left_side;
1916 right_side.Unuse();
1917 left_side.Unuse();
1918 operand.ToRegister();
1919 __ cmp(operand.reg(), Factory::null_value());
1920 if (strict) {
1921 operand.Unuse();
1922 dest->Split(equal);
1923 } else {
1924 // The 'null' value is only equal to 'undefined' if using non-strict
1925 // comparisons.
1926 dest->true_target()->Branch(equal);
1927 __ cmp(operand.reg(), Factory::undefined_value());
1928 dest->true_target()->Branch(equal);
1929 __ test(operand.reg(), Immediate(kSmiTagMask));
1930 dest->false_target()->Branch(equal);
1931
1932 // It can be an undetectable object.
1933 // Use a scratch register in preference to spilling operand.reg().
1934 Result temp = allocator()->Allocate();
1935 ASSERT(temp.is_valid());
1936 __ mov(temp.reg(),
1937 FieldOperand(operand.reg(), HeapObject::kMapOffset));
1938 __ movzx_b(temp.reg(),
1939 FieldOperand(temp.reg(), Map::kBitFieldOffset));
1940 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
1941 temp.Unuse();
1942 operand.Unuse();
1943 dest->Split(not_zero);
1944 }
1945 } else { // Neither side is a constant Smi or null.
1946 // If either side is a non-smi constant, skip the smi check.
1947 bool known_non_smi =
1948 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
1949 (right_side.is_constant() && !right_side.handle()->IsSmi());
1950 left_side.ToRegister();
1951 right_side.ToRegister();
1952
1953 if (known_non_smi) {
1954 // When non-smi, call out to the compare stub.
1955 CompareStub stub(cc, strict);
1956 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
1957 if (cc == equal) {
1958 __ test(answer.reg(), Operand(answer.reg()));
1959 } else {
1960 __ cmp(answer.reg(), 0);
1961 }
1962 answer.Unuse();
1963 dest->Split(cc);
1964 } else {
1965 // Here we split control flow to the stub call and inlined cases
1966 // before finally splitting it to the control destination. We use
1967 // a jump target and branching to duplicate the virtual frame at
1968 // the first split. We manually handle the off-frame references
1969 // by reconstituting them on the non-fall-through path.
1970 JumpTarget is_smi;
1971 Register left_reg = left_side.reg();
1972 Register right_reg = right_side.reg();
1973
1974 Result temp = allocator_->Allocate();
1975 ASSERT(temp.is_valid());
1976 __ mov(temp.reg(), left_side.reg());
1977 __ or_(temp.reg(), Operand(right_side.reg()));
1978 __ test(temp.reg(), Immediate(kSmiTagMask));
1979 temp.Unuse();
1980 is_smi.Branch(zero, taken);
1981 // When non-smi, call out to the compare stub.
1982 CompareStub stub(cc, strict);
1983 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
1984 if (cc == equal) {
1985 __ test(answer.reg(), Operand(answer.reg()));
1986 } else {
1987 __ cmp(answer.reg(), 0);
1988 }
1989 answer.Unuse();
1990 dest->true_target()->Branch(cc);
1991 dest->false_target()->Jump();
1992
1993 is_smi.Bind();
1994 left_side = Result(left_reg);
1995 right_side = Result(right_reg);
1996 __ cmp(left_side.reg(), Operand(right_side.reg()));
1997 right_side.Unuse();
1998 left_side.Unuse();
1999 dest->Split(cc);
2000 }
2001 }
2002}
2003
2004
2005class CallFunctionStub: public CodeStub {
2006 public:
2007 CallFunctionStub(int argc, InLoopFlag in_loop)
2008 : argc_(argc), in_loop_(in_loop) { }
2009
2010 void Generate(MacroAssembler* masm);
2011
2012 private:
2013 int argc_;
2014 InLoopFlag in_loop_;
2015
2016#ifdef DEBUG
2017 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
2018#endif
2019
2020 Major MajorKey() { return CallFunction; }
2021 int MinorKey() { return argc_; }
2022 InLoopFlag InLoop() { return in_loop_; }
2023};
2024
2025
2026// Call the function just below TOS on the stack with the given
2027// arguments. The receiver is the TOS.
2028void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
2029 int position) {
2030 // Push the arguments ("left-to-right") on the stack.
2031 int arg_count = args->length();
2032 for (int i = 0; i < arg_count; i++) {
2033 Load(args->at(i));
2034 }
2035
2036 // Record the position for debugging purposes.
2037 CodeForSourcePosition(position);
2038
2039 // Use the shared code stub to call the function.
2040 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2041 CallFunctionStub call_function(arg_count, in_loop);
2042 Result answer = frame_->CallStub(&call_function, arg_count + 1);
2043 // Restore context and replace function on the stack with the
2044 // result of the stub invocation.
2045 frame_->RestoreContextRegister();
2046 frame_->SetElementAt(0, &answer);
2047}
2048
2049
2050void CodeGenerator::CallApplyLazy(Property* apply,
2051 Expression* receiver,
2052 VariableProxy* arguments,
2053 int position) {
2054 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
2055 ASSERT(arguments->IsArguments());
2056
2057 JumpTarget slow, done;
2058
2059 // Load the apply function onto the stack. This will usually
2060 // give us a megamorphic load site. Not super, but it works.
2061 Reference ref(this, apply);
2062 ref.GetValue(NOT_INSIDE_TYPEOF);
2063 ASSERT(ref.type() == Reference::NAMED);
2064
2065 // Load the receiver and the existing arguments object onto the
2066 // expression stack. Avoid allocating the arguments object here.
2067 Load(receiver);
2068 LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
2069
2070 // Emit the source position information after having loaded the
2071 // receiver and the arguments.
2072 CodeForSourcePosition(position);
2073
2074 // Check if the arguments object has been lazily allocated
2075 // already. If so, just use that instead of copying the arguments
2076 // from the stack. This also deals with cases where a local variable
2077 // named 'arguments' has been introduced.
2078 frame_->Dup();
2079 Result probe = frame_->Pop();
2080 bool try_lazy = true;
2081 if (probe.is_constant()) {
2082 try_lazy = probe.handle()->IsTheHole();
2083 } else {
2084 __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
2085 probe.Unuse();
2086 slow.Branch(not_equal);
2087 }
2088
2089 if (try_lazy) {
2090 JumpTarget build_args;
2091
2092 // Get rid of the arguments object probe.
2093 frame_->Drop();
2094
2095 // Before messing with the execution stack, we sync all
2096 // elements. This is bound to happen anyway because we're
2097 // about to call a function.
2098 frame_->SyncRange(0, frame_->element_count() - 1);
2099
2100 // Check that the receiver really is a JavaScript object.
2101 { frame_->PushElementAt(0);
2102 Result receiver = frame_->Pop();
2103 receiver.ToRegister();
2104 __ test(receiver.reg(), Immediate(kSmiTagMask));
2105 build_args.Branch(zero);
2106 Result tmp = allocator_->Allocate();
2107 // We allow all JSObjects including JSFunctions. As long as
2108 // JS_FUNCTION_TYPE is the last instance type and it is right
2109 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
2110 // bound.
2111 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2112 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
2113 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
2114 build_args.Branch(less);
2115 }
2116
2117 // Verify that we're invoking Function.prototype.apply.
2118 { frame_->PushElementAt(1);
2119 Result apply = frame_->Pop();
2120 apply.ToRegister();
2121 __ test(apply.reg(), Immediate(kSmiTagMask));
2122 build_args.Branch(zero);
2123 Result tmp = allocator_->Allocate();
2124 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
2125 build_args.Branch(not_equal);
2126 __ mov(tmp.reg(),
2127 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
2128 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
2129 __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
2130 Immediate(apply_code));
2131 build_args.Branch(not_equal);
2132 }
2133
2134 // Get the function receiver from the stack. Check that it
2135 // really is a function.
2136 __ mov(edi, Operand(esp, 2 * kPointerSize));
2137 __ test(edi, Immediate(kSmiTagMask));
2138 build_args.Branch(zero);
2139 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
2140 build_args.Branch(not_equal);
2141
2142 // Copy the arguments to this function possibly from the
2143 // adaptor frame below it.
2144 Label invoke, adapted;
2145 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2146 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2147 __ cmp(Operand(ecx),
2148 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2149 __ j(equal, &adapted);
2150
2151 // No arguments adaptor frame. Copy fixed number of arguments.
2152 __ mov(eax, Immediate(scope_->num_parameters()));
2153 for (int i = 0; i < scope_->num_parameters(); i++) {
2154 __ push(frame_->ParameterAt(i));
2155 }
2156 __ jmp(&invoke);
2157
2158 // Arguments adaptor frame present. Copy arguments from there, but
2159 // avoid copying too many arguments to avoid stack overflows.
2160 __ bind(&adapted);
2161 static const uint32_t kArgumentsLimit = 1 * KB;
2162 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2163 __ shr(eax, kSmiTagSize);
2164 __ mov(ecx, Operand(eax));
2165 __ cmp(eax, kArgumentsLimit);
2166 build_args.Branch(above);
2167
2168 // Loop through the arguments pushing them onto the execution
2169 // stack. We don't inform the virtual frame of the push, so we don't
2170 // have to worry about getting rid of the elements from the virtual
2171 // frame.
2172 Label loop;
2173 __ bind(&loop);
2174 __ test(ecx, Operand(ecx));
2175 __ j(zero, &invoke);
2176 __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
2177 __ dec(ecx);
2178 __ jmp(&loop);
2179
2180 // Invoke the function. The virtual frame knows about the receiver
2181 // so make sure to forget that explicitly.
2182 __ bind(&invoke);
2183 ParameterCount actual(eax);
2184 __ InvokeFunction(edi, actual, CALL_FUNCTION);
2185 frame_->Forget(1);
2186 Result result = allocator()->Allocate(eax);
2187 frame_->SetElementAt(0, &result);
2188 done.Jump();
2189
2190 // Slow-case: Allocate the arguments object since we know it isn't
2191 // there, and fall-through to the slow-case where we call
2192 // Function.prototype.apply.
2193 build_args.Bind();
2194 Result arguments_object = StoreArgumentsObject(false);
2195 frame_->Push(&arguments_object);
2196 slow.Bind();
2197 }
2198
2199 // Flip the apply function and the function to call on the stack, so
2200 // the function looks like the receiver of the apply call. This way,
2201 // the generic Function.prototype.apply implementation can deal with
2202 // the call like it usually does.
2203 Result a2 = frame_->Pop();
2204 Result a1 = frame_->Pop();
2205 Result ap = frame_->Pop();
2206 Result fn = frame_->Pop();
2207 frame_->Push(&ap);
2208 frame_->Push(&fn);
2209 frame_->Push(&a1);
2210 frame_->Push(&a2);
2211 CallFunctionStub call_function(2, NOT_IN_LOOP);
2212 Result res = frame_->CallStub(&call_function, 3);
2213 frame_->Push(&res);
2214
2215 // All done. Restore context register after call.
2216 if (try_lazy) done.Bind();
2217 frame_->RestoreContextRegister();
2218}
2219
2220
2221class DeferredStackCheck: public DeferredCode {
2222 public:
2223 DeferredStackCheck() {
2224 set_comment("[ DeferredStackCheck");
2225 }
2226
2227 virtual void Generate();
2228};
2229
2230
2231void DeferredStackCheck::Generate() {
2232 StackCheckStub stub;
2233 __ CallStub(&stub);
2234}
2235
2236
2237void CodeGenerator::CheckStack() {
2238 if (FLAG_check_stack) {
2239 DeferredStackCheck* deferred = new DeferredStackCheck;
2240 ExternalReference stack_guard_limit =
2241 ExternalReference::address_of_stack_guard_limit();
2242 __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
2243 deferred->Branch(below);
2244 deferred->BindExit();
2245 }
2246}
2247
2248
2249void CodeGenerator::VisitAndSpill(Statement* statement) {
2250 ASSERT(in_spilled_code());
2251 set_in_spilled_code(false);
2252 Visit(statement);
2253 if (frame_ != NULL) {
2254 frame_->SpillAll();
2255 }
2256 set_in_spilled_code(true);
2257}
2258
2259
2260void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
2261 ASSERT(in_spilled_code());
2262 set_in_spilled_code(false);
2263 VisitStatements(statements);
2264 if (frame_ != NULL) {
2265 frame_->SpillAll();
2266 }
2267 set_in_spilled_code(true);
2268}
2269
2270
2271void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
2272 ASSERT(!in_spilled_code());
2273 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
2274 Visit(statements->at(i));
2275 }
2276}
2277
2278
2279void CodeGenerator::VisitBlock(Block* node) {
2280 ASSERT(!in_spilled_code());
2281 Comment cmnt(masm_, "[ Block");
2282 CodeForStatementPosition(node);
2283 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2284 VisitStatements(node->statements());
2285 if (node->break_target()->is_linked()) {
2286 node->break_target()->Bind();
2287 }
2288 node->break_target()->Unuse();
2289}
2290
2291
2292void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
2293 // Call the runtime to declare the globals. The inevitable call
2294 // will sync frame elements to memory anyway, so we do it eagerly to
2295 // allow us to push the arguments directly into place.
2296 frame_->SyncRange(0, frame_->element_count() - 1);
2297
2298 frame_->EmitPush(Immediate(pairs));
2299 frame_->EmitPush(esi); // The context is the second argument.
2300 frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
2301 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
2302 // Return value is ignored.
2303}
2304
2305
2306void CodeGenerator::VisitDeclaration(Declaration* node) {
2307 Comment cmnt(masm_, "[ Declaration");
2308 Variable* var = node->proxy()->var();
2309 ASSERT(var != NULL); // must have been resolved
2310 Slot* slot = var->slot();
2311
2312 // If it was not possible to allocate the variable at compile time,
2313 // we need to "declare" it at runtime to make sure it actually
2314 // exists in the local context.
2315 if (slot != NULL && slot->type() == Slot::LOOKUP) {
2316 // Variables with a "LOOKUP" slot were introduced as non-locals
2317 // during variable resolution and must have mode DYNAMIC.
2318 ASSERT(var->is_dynamic());
2319 // For now, just do a runtime call. Sync the virtual frame eagerly
2320 // so we can simply push the arguments into place.
2321 frame_->SyncRange(0, frame_->element_count() - 1);
2322 frame_->EmitPush(esi);
2323 frame_->EmitPush(Immediate(var->name()));
2324 // Declaration nodes are always introduced in one of two modes.
2325 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
2326 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
2327 frame_->EmitPush(Immediate(Smi::FromInt(attr)));
2328 // Push initial value, if any.
2329 // Note: For variables we must not push an initial value (such as
2330 // 'undefined') because we may have a (legal) redeclaration and we
2331 // must not destroy the current value.
2332 if (node->mode() == Variable::CONST) {
2333 frame_->EmitPush(Immediate(Factory::the_hole_value()));
2334 } else if (node->fun() != NULL) {
2335 Load(node->fun());
2336 } else {
2337 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
2338 }
2339 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
2340 // Ignore the return value (declarations are statements).
2341 return;
2342 }
2343
2344 ASSERT(!var->is_global());
2345
2346 // If we have a function or a constant, we need to initialize the variable.
2347 Expression* val = NULL;
2348 if (node->mode() == Variable::CONST) {
2349 val = new Literal(Factory::the_hole_value());
2350 } else {
2351 val = node->fun(); // NULL if we don't have a function
2352 }
2353
2354 if (val != NULL) {
2355 {
2356 // Set the initial value.
2357 Reference target(this, node->proxy());
2358 Load(val);
2359 target.SetValue(NOT_CONST_INIT);
2360 // The reference is removed from the stack (preserving TOS) when
2361 // it goes out of scope.
2362 }
2363 // Get rid of the assigned value (declarations are statements).
2364 frame_->Drop();
2365 }
2366}
2367
2368
2369void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2370 ASSERT(!in_spilled_code());
2371 Comment cmnt(masm_, "[ ExpressionStatement");
2372 CodeForStatementPosition(node);
2373 Expression* expression = node->expression();
2374 expression->MarkAsStatement();
2375 Load(expression);
2376 // Remove the lingering expression result from the top of stack.
2377 frame_->Drop();
2378}
2379
2380
2381void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2382 ASSERT(!in_spilled_code());
2383 Comment cmnt(masm_, "// EmptyStatement");
2384 CodeForStatementPosition(node);
2385 // nothing to do
2386}
2387
2388
2389void CodeGenerator::VisitIfStatement(IfStatement* node) {
2390 ASSERT(!in_spilled_code());
2391 Comment cmnt(masm_, "[ IfStatement");
2392 // Generate different code depending on which parts of the if statement
2393 // are present or not.
2394 bool has_then_stm = node->HasThenStatement();
2395 bool has_else_stm = node->HasElseStatement();
2396
2397 CodeForStatementPosition(node);
2398 JumpTarget exit;
2399 if (has_then_stm && has_else_stm) {
2400 JumpTarget then;
2401 JumpTarget else_;
2402 ControlDestination dest(&then, &else_, true);
2403 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
2404
2405 if (dest.false_was_fall_through()) {
2406 // The else target was bound, so we compile the else part first.
2407 Visit(node->else_statement());
2408
2409 // We may have dangling jumps to the then part.
2410 if (then.is_linked()) {
2411 if (has_valid_frame()) exit.Jump();
2412 then.Bind();
2413 Visit(node->then_statement());
2414 }
2415 } else {
2416 // The then target was bound, so we compile the then part first.
2417 Visit(node->then_statement());
2418
2419 if (else_.is_linked()) {
2420 if (has_valid_frame()) exit.Jump();
2421 else_.Bind();
2422 Visit(node->else_statement());
2423 }
2424 }
2425
2426 } else if (has_then_stm) {
2427 ASSERT(!has_else_stm);
2428 JumpTarget then;
2429 ControlDestination dest(&then, &exit, true);
2430 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
2431
2432 if (dest.false_was_fall_through()) {
2433 // The exit label was bound. We may have dangling jumps to the
2434 // then part.
2435 if (then.is_linked()) {
2436 exit.Unuse();
2437 exit.Jump();
2438 then.Bind();
2439 Visit(node->then_statement());
2440 }
2441 } else {
2442 // The then label was bound.
2443 Visit(node->then_statement());
2444 }
2445
2446 } else if (has_else_stm) {
2447 ASSERT(!has_then_stm);
2448 JumpTarget else_;
2449 ControlDestination dest(&exit, &else_, false);
2450 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
2451
2452 if (dest.true_was_fall_through()) {
2453 // The exit label was bound. We may have dangling jumps to the
2454 // else part.
2455 if (else_.is_linked()) {
2456 exit.Unuse();
2457 exit.Jump();
2458 else_.Bind();
2459 Visit(node->else_statement());
2460 }
2461 } else {
2462 // The else label was bound.
2463 Visit(node->else_statement());
2464 }
2465
2466 } else {
2467 ASSERT(!has_then_stm && !has_else_stm);
2468 // We only care about the condition's side effects (not its value
2469 // or control flow effect). LoadCondition is called without
2470 // forcing control flow.
2471 ControlDestination dest(&exit, &exit, true);
2472 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
2473 if (!dest.is_used()) {
2474 // We got a value on the frame rather than (or in addition to)
2475 // control flow.
2476 frame_->Drop();
2477 }
2478 }
2479
2480 if (exit.is_linked()) {
2481 exit.Bind();
2482 }
2483}
2484
2485
2486void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2487 ASSERT(!in_spilled_code());
2488 Comment cmnt(masm_, "[ ContinueStatement");
2489 CodeForStatementPosition(node);
2490 node->target()->continue_target()->Jump();
2491}
2492
2493
2494void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2495 ASSERT(!in_spilled_code());
2496 Comment cmnt(masm_, "[ BreakStatement");
2497 CodeForStatementPosition(node);
2498 node->target()->break_target()->Jump();
2499}
2500
2501
2502void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2503 ASSERT(!in_spilled_code());
2504 Comment cmnt(masm_, "[ ReturnStatement");
2505
2506 CodeForStatementPosition(node);
2507 Load(node->expression());
2508 Result return_value = frame_->Pop();
2509 if (function_return_is_shadowed_) {
2510 function_return_.Jump(&return_value);
2511 } else {
2512 frame_->PrepareForReturn();
2513 if (function_return_.is_bound()) {
2514 // If the function return label is already bound we reuse the
2515 // code by jumping to the return site.
2516 function_return_.Jump(&return_value);
2517 } else {
2518 function_return_.Bind(&return_value);
2519 GenerateReturnSequence(&return_value);
2520 }
2521 }
2522}
2523
2524
2525void CodeGenerator::GenerateReturnSequence(Result* return_value) {
2526 // The return value is a live (but not currently reference counted)
2527 // reference to eax. This is safe because the current frame does not
2528 // contain a reference to eax (it is prepared for the return by spilling
2529 // all registers).
2530 if (FLAG_trace) {
2531 frame_->Push(return_value);
2532 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
2533 }
2534 return_value->ToRegister(eax);
2535
2536 // Add a label for checking the size of the code used for returning.
2537 Label check_exit_codesize;
2538 masm_->bind(&check_exit_codesize);
2539
2540 // Leave the frame and return popping the arguments and the
2541 // receiver.
2542 frame_->Exit();
2543 masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
2544 DeleteFrame();
2545
2546#ifdef ENABLE_DEBUGGER_SUPPORT
2547 // Check that the size of the code used for returning matches what is
2548 // expected by the debugger.
2549 ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
2550 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
2551#endif
2552}
2553
2554
2555void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2556 ASSERT(!in_spilled_code());
2557 Comment cmnt(masm_, "[ WithEnterStatement");
2558 CodeForStatementPosition(node);
2559 Load(node->expression());
2560 Result context;
2561 if (node->is_catch_block()) {
2562 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
2563 } else {
2564 context = frame_->CallRuntime(Runtime::kPushContext, 1);
2565 }
2566
2567 // Update context local.
2568 frame_->SaveContextRegister();
2569
2570 // Verify that the runtime call result and esi agree.
2571 if (FLAG_debug_code) {
2572 __ cmp(context.reg(), Operand(esi));
2573 __ Assert(equal, "Runtime::NewContext should end up in esi");
2574 }
2575}
2576
2577
2578void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2579 ASSERT(!in_spilled_code());
2580 Comment cmnt(masm_, "[ WithExitStatement");
2581 CodeForStatementPosition(node);
2582 // Pop context.
2583 __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
2584 // Update context local.
2585 frame_->SaveContextRegister();
2586}
2587
2588
2589void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2590 ASSERT(!in_spilled_code());
2591 Comment cmnt(masm_, "[ SwitchStatement");
2592 CodeForStatementPosition(node);
2593 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2594
2595 // Compile the switch value.
2596 Load(node->tag());
2597
2598 ZoneList<CaseClause*>* cases = node->cases();
2599 int length = cases->length();
2600 CaseClause* default_clause = NULL;
2601
2602 JumpTarget next_test;
2603 // Compile the case label expressions and comparisons. Exit early
2604 // if a comparison is unconditionally true. The target next_test is
2605 // bound before the loop in order to indicate control flow to the
2606 // first comparison.
2607 next_test.Bind();
2608 for (int i = 0; i < length && !next_test.is_unused(); i++) {
2609 CaseClause* clause = cases->at(i);
2610 // The default is not a test, but remember it for later.
2611 if (clause->is_default()) {
2612 default_clause = clause;
2613 continue;
2614 }
2615
2616 Comment cmnt(masm_, "[ Case comparison");
2617 // We recycle the same target next_test for each test. Bind it if
2618 // the previous test has not done so and then unuse it for the
2619 // loop.
2620 if (next_test.is_linked()) {
2621 next_test.Bind();
2622 }
2623 next_test.Unuse();
2624
2625 // Duplicate the switch value.
2626 frame_->Dup();
2627
2628 // Compile the label expression.
2629 Load(clause->label());
2630
2631 // Compare and branch to the body if true or the next test if
2632 // false. Prefer the next test as a fall through.
2633 ControlDestination dest(clause->body_target(), &next_test, false);
2634 Comparison(equal, true, &dest);
2635
2636 // If the comparison fell through to the true target, jump to the
2637 // actual body.
2638 if (dest.true_was_fall_through()) {
2639 clause->body_target()->Unuse();
2640 clause->body_target()->Jump();
2641 }
2642 }
2643
2644 // If there was control flow to a next test from the last one
2645 // compiled, compile a jump to the default or break target.
2646 if (!next_test.is_unused()) {
2647 if (next_test.is_linked()) {
2648 next_test.Bind();
2649 }
2650 // Drop the switch value.
2651 frame_->Drop();
2652 if (default_clause != NULL) {
2653 default_clause->body_target()->Jump();
2654 } else {
2655 node->break_target()->Jump();
2656 }
2657 }
2658
2659
2660 // The last instruction emitted was a jump, either to the default
2661 // clause or the break target, or else to a case body from the loop
2662 // that compiles the tests.
2663 ASSERT(!has_valid_frame());
2664 // Compile case bodies as needed.
2665 for (int i = 0; i < length; i++) {
2666 CaseClause* clause = cases->at(i);
2667
2668 // There are two ways to reach the body: from the corresponding
2669 // test or as the fall through of the previous body.
2670 if (clause->body_target()->is_linked() || has_valid_frame()) {
2671 if (clause->body_target()->is_linked()) {
2672 if (has_valid_frame()) {
2673 // If we have both a jump to the test and a fall through, put
2674 // a jump on the fall through path to avoid the dropping of
2675 // the switch value on the test path. The exception is the
2676 // default which has already had the switch value dropped.
2677 if (clause->is_default()) {
2678 clause->body_target()->Bind();
2679 } else {
2680 JumpTarget body;
2681 body.Jump();
2682 clause->body_target()->Bind();
2683 frame_->Drop();
2684 body.Bind();
2685 }
2686 } else {
2687 // No fall through to worry about.
2688 clause->body_target()->Bind();
2689 if (!clause->is_default()) {
2690 frame_->Drop();
2691 }
2692 }
2693 } else {
2694 // Otherwise, we have only fall through.
2695 ASSERT(has_valid_frame());
2696 }
2697
2698 // We are now prepared to compile the body.
2699 Comment cmnt(masm_, "[ Case body");
2700 VisitStatements(clause->statements());
2701 }
2702 clause->body_target()->Unuse();
2703 }
2704
2705 // We may not have a valid frame here so bind the break target only
2706 // if needed.
2707 if (node->break_target()->is_linked()) {
2708 node->break_target()->Bind();
2709 }
2710 node->break_target()->Unuse();
2711}
2712
2713
2714void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
2715 ASSERT(!in_spilled_code());
2716 Comment cmnt(masm_, "[ LoopStatement");
2717 CodeForStatementPosition(node);
2718 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2719
2720 // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
2721 // known result for the test expression, with no side effects.
2722 enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
2723 if (node->cond() == NULL) {
2724 ASSERT(node->type() == LoopStatement::FOR_LOOP);
2725 info = ALWAYS_TRUE;
2726 } else {
2727 Literal* lit = node->cond()->AsLiteral();
2728 if (lit != NULL) {
2729 if (lit->IsTrue()) {
2730 info = ALWAYS_TRUE;
2731 } else if (lit->IsFalse()) {
2732 info = ALWAYS_FALSE;
2733 }
2734 }
2735 }
2736
2737 switch (node->type()) {
2738 case LoopStatement::DO_LOOP: {
2739 JumpTarget body(JumpTarget::BIDIRECTIONAL);
2740 IncrementLoopNesting();
2741
2742 // Label the top of the loop for the backward jump if necessary.
2743 if (info == ALWAYS_TRUE) {
2744 // Use the continue target.
2745 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
2746 node->continue_target()->Bind();
2747 } else if (info == ALWAYS_FALSE) {
2748 // No need to label it.
2749 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2750 } else {
2751 // Continue is the test, so use the backward body target.
2752 ASSERT(info == DONT_KNOW);
2753 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2754 body.Bind();
2755 }
2756
2757 CheckStack(); // TODO(1222600): ignore if body contains calls.
2758 Visit(node->body());
2759
2760 // Compile the test.
2761 if (info == ALWAYS_TRUE) {
2762 // If control flow can fall off the end of the body, jump back
2763 // to the top and bind the break target at the exit.
2764 if (has_valid_frame()) {
2765 node->continue_target()->Jump();
2766 }
2767 if (node->break_target()->is_linked()) {
2768 node->break_target()->Bind();
2769 }
2770
2771 } else if (info == ALWAYS_FALSE) {
2772 // We may have had continues or breaks in the body.
2773 if (node->continue_target()->is_linked()) {
2774 node->continue_target()->Bind();
2775 }
2776 if (node->break_target()->is_linked()) {
2777 node->break_target()->Bind();
2778 }
2779
2780 } else {
2781 ASSERT(info == DONT_KNOW);
2782 // We have to compile the test expression if it can be reached by
2783 // control flow falling out of the body or via continue.
2784 if (node->continue_target()->is_linked()) {
2785 node->continue_target()->Bind();
2786 }
2787 if (has_valid_frame()) {
2788 ControlDestination dest(&body, node->break_target(), false);
2789 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
2790 }
2791 if (node->break_target()->is_linked()) {
2792 node->break_target()->Bind();
2793 }
2794 }
2795 break;
2796 }
2797
2798 case LoopStatement::WHILE_LOOP: {
2799 // Do not duplicate conditions that may have function literal
2800 // subexpressions. This can cause us to compile the function
2801 // literal twice.
2802 bool test_at_bottom = !node->may_have_function_literal();
2803
2804 IncrementLoopNesting();
2805
2806 // If the condition is always false and has no side effects, we
2807 // do not need to compile anything.
2808 if (info == ALWAYS_FALSE) break;
2809
2810 JumpTarget body;
2811 if (test_at_bottom) {
2812 body.set_direction(JumpTarget::BIDIRECTIONAL);
2813 }
2814
2815 // Based on the condition analysis, compile the test as necessary.
2816 if (info == ALWAYS_TRUE) {
2817 // We will not compile the test expression. Label the top of
2818 // the loop with the continue target.
2819 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
2820 node->continue_target()->Bind();
2821 } else {
2822 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
2823 if (test_at_bottom) {
2824 // Continue is the test at the bottom, no need to label the
2825 // test at the top. The body is a backward target.
2826 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2827 } else {
2828 // Label the test at the top as the continue target. The
2829 // body is a forward-only target.
2830 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
2831 node->continue_target()->Bind();
2832 }
2833 // Compile the test with the body as the true target and
2834 // preferred fall-through and with the break target as the
2835 // false target.
2836 ControlDestination dest(&body, node->break_target(), true);
2837 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
2838
2839 if (dest.false_was_fall_through()) {
2840 // If we got the break target as fall-through, the test may
2841 // have been unconditionally false (if there are no jumps to
2842 // the body).
2843 if (!body.is_linked()) break;
2844
2845 // Otherwise, jump around the body on the fall through and
2846 // then bind the body target.
2847 node->break_target()->Unuse();
2848 node->break_target()->Jump();
2849 body.Bind();
2850 }
2851 }
2852
2853 CheckStack(); // TODO(1222600): ignore if body contains calls.
2854 Visit(node->body());
2855
2856 // Based on the condition analysis, compile the backward jump as
2857 // necessary.
2858 if (info == ALWAYS_TRUE) {
2859 // The loop body has been labeled with the continue target.
2860 if (has_valid_frame()) {
2861 node->continue_target()->Jump();
2862 }
2863 } else {
2864 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
2865 if (test_at_bottom) {
2866 // If we have chosen to recompile the test at the bottom,
2867 // then it is the continue target.
2868 if (node->continue_target()->is_linked()) {
2869 node->continue_target()->Bind();
2870 }
2871 if (has_valid_frame()) {
2872 // The break target is the fall-through (body is a backward
2873 // jump from here and thus an invalid fall-through).
2874 ControlDestination dest(&body, node->break_target(), false);
2875 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
2876 }
2877 } else {
2878 // If we have chosen not to recompile the test at the
2879 // bottom, jump back to the one at the top.
2880 if (has_valid_frame()) {
2881 node->continue_target()->Jump();
2882 }
2883 }
2884 }
2885
2886 // The break target may be already bound (by the condition), or
2887 // there may not be a valid frame. Bind it only if needed.
2888 if (node->break_target()->is_linked()) {
2889 node->break_target()->Bind();
2890 }
2891 break;
2892 }
2893
2894 case LoopStatement::FOR_LOOP: {
2895 // Do not duplicate conditions that may have function literal
2896 // subexpressions. This can cause us to compile the function
2897 // literal twice.
2898 bool test_at_bottom = !node->may_have_function_literal();
2899
2900 // Compile the init expression if present.
2901 if (node->init() != NULL) {
2902 Visit(node->init());
2903 }
2904
2905 IncrementLoopNesting();
2906
2907 // If the condition is always false and has no side effects, we
2908 // do not need to compile anything else.
2909 if (info == ALWAYS_FALSE) break;
2910
2911 // Target for backward edge if no test at the bottom, otherwise
2912 // unused.
2913 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2914
2915 // Target for backward edge if there is a test at the bottom,
2916 // otherwise used as target for test at the top.
2917 JumpTarget body;
2918 if (test_at_bottom) {
2919 body.set_direction(JumpTarget::BIDIRECTIONAL);
2920 }
2921
2922 // Based on the condition analysis, compile the test as necessary.
2923 if (info == ALWAYS_TRUE) {
2924 // We will not compile the test expression. Label the top of
2925 // the loop.
2926 if (node->next() == NULL) {
2927 // Use the continue target if there is no update expression.
2928 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
2929 node->continue_target()->Bind();
2930 } else {
2931 // Otherwise use the backward loop target.
2932 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2933 loop.Bind();
2934 }
2935 } else {
2936 ASSERT(info == DONT_KNOW);
2937 if (test_at_bottom) {
2938 // Continue is either the update expression or the test at
2939 // the bottom, no need to label the test at the top.
2940 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2941 } else if (node->next() == NULL) {
2942 // We are not recompiling the test at the bottom and there
2943 // is no update expression.
2944 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
2945 node->continue_target()->Bind();
2946 } else {
2947 // We are not recompiling the test at the bottom and there
2948 // is an update expression.
2949 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2950 loop.Bind();
2951 }
2952
2953 // Compile the test with the body as the true target and
2954 // preferred fall-through and with the break target as the
2955 // false target.
2956 ControlDestination dest(&body, node->break_target(), true);
2957 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
2958
2959 if (dest.false_was_fall_through()) {
2960 // If we got the break target as fall-through, the test may
2961 // have been unconditionally false (if there are no jumps to
2962 // the body).
2963 if (!body.is_linked()) break;
2964
2965 // Otherwise, jump around the body on the fall through and
2966 // then bind the body target.
2967 node->break_target()->Unuse();
2968 node->break_target()->Jump();
2969 body.Bind();
2970 }
2971 }
2972
2973 CheckStack(); // TODO(1222600): ignore if body contains calls.
2974 Visit(node->body());
2975
2976 // If there is an update expression, compile it if necessary.
2977 if (node->next() != NULL) {
2978 if (node->continue_target()->is_linked()) {
2979 node->continue_target()->Bind();
2980 }
2981
2982 // Control can reach the update by falling out of the body or
2983 // by a continue.
2984 if (has_valid_frame()) {
2985 // Record the source position of the statement as this code
2986 // which is after the code for the body actually belongs to
2987 // the loop statement and not the body.
2988 CodeForStatementPosition(node);
2989 Visit(node->next());
2990 }
2991 }
2992
2993 // Based on the condition analysis, compile the backward jump as
2994 // necessary.
2995 if (info == ALWAYS_TRUE) {
2996 if (has_valid_frame()) {
2997 if (node->next() == NULL) {
2998 node->continue_target()->Jump();
2999 } else {
3000 loop.Jump();
3001 }
3002 }
3003 } else {
3004 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
3005 if (test_at_bottom) {
3006 if (node->continue_target()->is_linked()) {
3007 // We can have dangling jumps to the continue target if
3008 // there was no update expression.
3009 node->continue_target()->Bind();
3010 }
3011 // Control can reach the test at the bottom by falling out
3012 // of the body, by a continue in the body, or from the
3013 // update expression.
3014 if (has_valid_frame()) {
3015 // The break target is the fall-through (body is a
3016 // backward jump from here).
3017 ControlDestination dest(&body, node->break_target(), false);
3018 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
3019 }
3020 } else {
3021 // Otherwise, jump back to the test at the top.
3022 if (has_valid_frame()) {
3023 if (node->next() == NULL) {
3024 node->continue_target()->Jump();
3025 } else {
3026 loop.Jump();
3027 }
3028 }
3029 }
3030 }
3031
3032 // The break target may be already bound (by the condition), or
3033 // there may not be a valid frame. Bind it only if needed.
3034 if (node->break_target()->is_linked()) {
3035 node->break_target()->Bind();
3036 }
3037 break;
3038 }
3039 }
3040
3041 DecrementLoopNesting();
3042 node->continue_target()->Unuse();
3043 node->break_target()->Unuse();
3044}
3045
3046
3047void CodeGenerator::VisitForInStatement(ForInStatement* node) {
3048 ASSERT(!in_spilled_code());
3049 VirtualFrame::SpilledScope spilled_scope;
3050 Comment cmnt(masm_, "[ ForInStatement");
3051 CodeForStatementPosition(node);
3052
3053 JumpTarget primitive;
3054 JumpTarget jsobject;
3055 JumpTarget fixed_array;
3056 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
3057 JumpTarget end_del_check;
3058 JumpTarget exit;
3059
3060 // Get the object to enumerate over (converted to JSObject).
3061 LoadAndSpill(node->enumerable());
3062
3063 // Both SpiderMonkey and kjs ignore null and undefined in contrast
3064 // to the specification. 12.6.4 mandates a call to ToObject.
3065 frame_->EmitPop(eax);
3066
3067 // eax: value to be iterated over
3068 __ cmp(eax, Factory::undefined_value());
3069 exit.Branch(equal);
3070 __ cmp(eax, Factory::null_value());
3071 exit.Branch(equal);
3072
3073 // Stack layout in body:
3074 // [iteration counter (smi)] <- slot 0
3075 // [length of array] <- slot 1
3076 // [FixedArray] <- slot 2
3077 // [Map or 0] <- slot 3
3078 // [Object] <- slot 4
3079
3080 // Check if enumerable is already a JSObject
3081 // eax: value to be iterated over
3082 __ test(eax, Immediate(kSmiTagMask));
3083 primitive.Branch(zero);
3084 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
3085 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
3086 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
3087 jsobject.Branch(above_equal);
3088
3089 primitive.Bind();
3090 frame_->EmitPush(eax);
3091 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
3092 // function call returns the value in eax, which is where we want it below
3093
3094 jsobject.Bind();
3095 // Get the set of properties (as a FixedArray or Map).
3096 // eax: value to be iterated over
3097 frame_->EmitPush(eax); // push the object being iterated over (slot 4)
3098
3099 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
3100 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
3101
3102 // If we got a Map, we can do a fast modification check.
3103 // Otherwise, we got a FixedArray, and we have to do a slow check.
3104 // eax: map or fixed array (result from call to
3105 // Runtime::kGetPropertyNamesFast)
3106 __ mov(edx, Operand(eax));
3107 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
3108 __ cmp(ecx, Factory::meta_map());
3109 fixed_array.Branch(not_equal);
3110
3111 // Get enum cache
3112 // eax: map (result from call to Runtime::kGetPropertyNamesFast)
3113 __ mov(ecx, Operand(eax));
3114 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
3115 // Get the bridge array held in the enumeration index field.
3116 __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
3117 // Get the cache from the bridge array.
3118 __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
3119
3120 frame_->EmitPush(eax); // <- slot 3
3121 frame_->EmitPush(edx); // <- slot 2
3122 __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
3123 __ shl(eax, kSmiTagSize);
3124 frame_->EmitPush(eax); // <- slot 1
3125 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
3126 entry.Jump();
3127
3128 fixed_array.Bind();
3129 // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
3130 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
3131 frame_->EmitPush(eax); // <- slot 2
3132
3133 // Push the length of the array and the initial index onto the stack.
3134 __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
3135 __ shl(eax, kSmiTagSize);
3136 frame_->EmitPush(eax); // <- slot 1
3137 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
3138
3139 // Condition.
3140 entry.Bind();
3141 // Grab the current frame's height for the break and continue
3142 // targets only after all the state is pushed on the frame.
3143 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3144 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3145
3146 __ mov(eax, frame_->ElementAt(0)); // load the current count
3147 __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
3148 node->break_target()->Branch(above_equal);
3149
3150 // Get the i'th entry of the array.
3151 __ mov(edx, frame_->ElementAt(2));
3152 __ mov(ebx, Operand(edx, eax, times_2,
3153 FixedArray::kHeaderSize - kHeapObjectTag));
3154
3155 // Get the expected map from the stack or a zero map in the
3156 // permanent slow case eax: current iteration count ebx: i'th entry
3157 // of the enum cache
3158 __ mov(edx, frame_->ElementAt(3));
3159 // Check if the expected map still matches that of the enumerable.
3160 // If not, we have to filter the key.
3161 // eax: current iteration count
3162 // ebx: i'th entry of the enum cache
3163 // edx: expected map value
3164 __ mov(ecx, frame_->ElementAt(4));
3165 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
3166 __ cmp(ecx, Operand(edx));
3167 end_del_check.Branch(equal);
3168
3169 // Convert the entry to a string (or null if it isn't a property anymore).
3170 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
3171 frame_->EmitPush(ebx); // push entry
3172 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
3173 __ mov(ebx, Operand(eax));
3174
3175 // If the property has been removed while iterating, we just skip it.
3176 __ cmp(ebx, Factory::null_value());
3177 node->continue_target()->Branch(equal);
3178
3179 end_del_check.Bind();
3180 // Store the entry in the 'each' expression and take another spin in the
3181 // loop. edx: i'th entry of the enum cache (or string there of)
3182 frame_->EmitPush(ebx);
3183 { Reference each(this, node->each());
3184 // Loading a reference may leave the frame in an unspilled state.
3185 frame_->SpillAll();
3186 if (!each.is_illegal()) {
3187 if (each.size() > 0) {
3188 frame_->EmitPush(frame_->ElementAt(each.size()));
3189 }
3190 // If the reference was to a slot we rely on the convenient property
3191 // that it doesn't matter whether a value (eg, ebx pushed above) is
3192 // right on top of or right underneath a zero-sized reference.
3193 each.SetValue(NOT_CONST_INIT);
3194 if (each.size() > 0) {
3195 // It's safe to pop the value lying on top of the reference before
3196 // unloading the reference itself (which preserves the top of stack,
3197 // ie, now the topmost value of the non-zero sized reference), since
3198 // we will discard the top of stack after unloading the reference
3199 // anyway.
3200 frame_->Drop();
3201 }
3202 }
3203 }
3204 // Unloading a reference may leave the frame in an unspilled state.
3205 frame_->SpillAll();
3206
3207 // Discard the i'th entry pushed above or else the remainder of the
3208 // reference, whichever is currently on top of the stack.
3209 frame_->Drop();
3210
3211 // Body.
3212 CheckStack(); // TODO(1222600): ignore if body contains calls.
3213 VisitAndSpill(node->body());
3214
3215 // Next. Reestablish a spilled frame in case we are coming here via
3216 // a continue in the body.
3217 node->continue_target()->Bind();
3218 frame_->SpillAll();
3219 frame_->EmitPop(eax);
3220 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
3221 frame_->EmitPush(eax);
3222 entry.Jump();
3223
3224 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
3225 // any frame.
3226 node->break_target()->Bind();
3227 frame_->Drop(5);
3228
3229 // Exit.
3230 exit.Bind();
3231
3232 node->continue_target()->Unuse();
3233 node->break_target()->Unuse();
3234}
3235
3236
3237void CodeGenerator::VisitTryCatch(TryCatch* node) {
3238 ASSERT(!in_spilled_code());
3239 VirtualFrame::SpilledScope spilled_scope;
3240 Comment cmnt(masm_, "[ TryCatch");
3241 CodeForStatementPosition(node);
3242
3243 JumpTarget try_block;
3244 JumpTarget exit;
3245
3246 try_block.Call();
3247 // --- Catch block ---
3248 frame_->EmitPush(eax);
3249
3250 // Store the caught exception in the catch variable.
3251 { Reference ref(this, node->catch_var());
3252 ASSERT(ref.is_slot());
3253 // Load the exception to the top of the stack. Here we make use of the
3254 // convenient property that it doesn't matter whether a value is
3255 // immediately on top of or underneath a zero-sized reference.
3256 ref.SetValue(NOT_CONST_INIT);
3257 }
3258
3259 // Remove the exception from the stack.
3260 frame_->Drop();
3261
3262 VisitStatementsAndSpill(node->catch_block()->statements());
3263 if (has_valid_frame()) {
3264 exit.Jump();
3265 }
3266
3267
3268 // --- Try block ---
3269 try_block.Bind();
3270
3271 frame_->PushTryHandler(TRY_CATCH_HANDLER);
3272 int handler_height = frame_->height();
3273
3274 // Shadow the jump targets for all escapes from the try block, including
3275 // returns. During shadowing, the original target is hidden as the
3276 // ShadowTarget and operations on the original actually affect the
3277 // shadowing target.
3278 //
3279 // We should probably try to unify the escaping targets and the return
3280 // target.
3281 int nof_escapes = node->escaping_targets()->length();
3282 List<ShadowTarget*> shadows(1 + nof_escapes);
3283
3284 // Add the shadow target for the function return.
3285 static const int kReturnShadowIndex = 0;
3286 shadows.Add(new ShadowTarget(&function_return_));
3287 bool function_return_was_shadowed = function_return_is_shadowed_;
3288 function_return_is_shadowed_ = true;
3289 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
3290
3291 // Add the remaining shadow targets.
3292 for (int i = 0; i < nof_escapes; i++) {
3293 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
3294 }
3295
3296 // Generate code for the statements in the try block.
3297 VisitStatementsAndSpill(node->try_block()->statements());
3298
3299 // Stop the introduced shadowing and count the number of required unlinks.
3300 // After shadowing stops, the original targets are unshadowed and the
3301 // ShadowTargets represent the formerly shadowing targets.
3302 bool has_unlinks = false;
3303 for (int i = 0; i < shadows.length(); i++) {
3304 shadows[i]->StopShadowing();
3305 has_unlinks = has_unlinks || shadows[i]->is_linked();
3306 }
3307 function_return_is_shadowed_ = function_return_was_shadowed;
3308
3309 // Get an external reference to the handler address.
3310 ExternalReference handler_address(Top::k_handler_address);
3311
3312 // Make sure that there's nothing left on the stack above the
3313 // handler structure.
3314 if (FLAG_debug_code) {
3315 __ mov(eax, Operand::StaticVariable(handler_address));
3316 __ cmp(esp, Operand(eax));
3317 __ Assert(equal, "stack pointer should point to top handler");
3318 }
3319
3320 // If we can fall off the end of the try block, unlink from try chain.
3321 if (has_valid_frame()) {
3322 // The next handler address is on top of the frame. Unlink from
3323 // the handler list and drop the rest of this handler from the
3324 // frame.
3325 ASSERT(StackHandlerConstants::kNextOffset == 0);
3326 frame_->EmitPop(Operand::StaticVariable(handler_address));
3327 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3328 if (has_unlinks) {
3329 exit.Jump();
3330 }
3331 }
3332
3333 // Generate unlink code for the (formerly) shadowing targets that
3334 // have been jumped to. Deallocate each shadow target.
3335 Result return_value;
3336 for (int i = 0; i < shadows.length(); i++) {
3337 if (shadows[i]->is_linked()) {
3338 // Unlink from try chain; be careful not to destroy the TOS if
3339 // there is one.
3340 if (i == kReturnShadowIndex) {
3341 shadows[i]->Bind(&return_value);
3342 return_value.ToRegister(eax);
3343 } else {
3344 shadows[i]->Bind();
3345 }
3346 // Because we can be jumping here (to spilled code) from
3347 // unspilled code, we need to reestablish a spilled frame at
3348 // this block.
3349 frame_->SpillAll();
3350
3351 // Reload sp from the top handler, because some statements that we
3352 // break from (eg, for...in) may have left stuff on the stack.
3353 __ mov(esp, Operand::StaticVariable(handler_address));
3354 frame_->Forget(frame_->height() - handler_height);
3355
3356 ASSERT(StackHandlerConstants::kNextOffset == 0);
3357 frame_->EmitPop(Operand::StaticVariable(handler_address));
3358 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3359
3360 if (i == kReturnShadowIndex) {
3361 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
3362 shadows[i]->other_target()->Jump(&return_value);
3363 } else {
3364 shadows[i]->other_target()->Jump();
3365 }
3366 }
3367 }
3368
3369 exit.Bind();
3370}
3371
3372
3373void CodeGenerator::VisitTryFinally(TryFinally* node) {
3374 ASSERT(!in_spilled_code());
3375 VirtualFrame::SpilledScope spilled_scope;
3376 Comment cmnt(masm_, "[ TryFinally");
3377 CodeForStatementPosition(node);
3378
3379 // State: Used to keep track of reason for entering the finally
3380 // block. Should probably be extended to hold information for
3381 // break/continue from within the try block.
3382 enum { FALLING, THROWING, JUMPING };
3383
3384 JumpTarget try_block;
3385 JumpTarget finally_block;
3386
3387 try_block.Call();
3388
3389 frame_->EmitPush(eax);
3390 // In case of thrown exceptions, this is where we continue.
3391 __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
3392 finally_block.Jump();
3393
3394 // --- Try block ---
3395 try_block.Bind();
3396
3397 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
3398 int handler_height = frame_->height();
3399
3400 // Shadow the jump targets for all escapes from the try block, including
3401 // returns. During shadowing, the original target is hidden as the
3402 // ShadowTarget and operations on the original actually affect the
3403 // shadowing target.
3404 //
3405 // We should probably try to unify the escaping targets and the return
3406 // target.
3407 int nof_escapes = node->escaping_targets()->length();
3408 List<ShadowTarget*> shadows(1 + nof_escapes);
3409
3410 // Add the shadow target for the function return.
3411 static const int kReturnShadowIndex = 0;
3412 shadows.Add(new ShadowTarget(&function_return_));
3413 bool function_return_was_shadowed = function_return_is_shadowed_;
3414 function_return_is_shadowed_ = true;
3415 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
3416
3417 // Add the remaining shadow targets.
3418 for (int i = 0; i < nof_escapes; i++) {
3419 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
3420 }
3421
3422 // Generate code for the statements in the try block.
3423 VisitStatementsAndSpill(node->try_block()->statements());
3424
3425 // Stop the introduced shadowing and count the number of required unlinks.
3426 // After shadowing stops, the original targets are unshadowed and the
3427 // ShadowTargets represent the formerly shadowing targets.
3428 int nof_unlinks = 0;
3429 for (int i = 0; i < shadows.length(); i++) {
3430 shadows[i]->StopShadowing();
3431 if (shadows[i]->is_linked()) nof_unlinks++;
3432 }
3433 function_return_is_shadowed_ = function_return_was_shadowed;
3434
3435 // Get an external reference to the handler address.
3436 ExternalReference handler_address(Top::k_handler_address);
3437
3438 // If we can fall off the end of the try block, unlink from the try
3439 // chain and set the state on the frame to FALLING.
3440 if (has_valid_frame()) {
3441 // The next handler address is on top of the frame.
3442 ASSERT(StackHandlerConstants::kNextOffset == 0);
3443 frame_->EmitPop(Operand::StaticVariable(handler_address));
3444 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3445
3446 // Fake a top of stack value (unneeded when FALLING) and set the
3447 // state in ecx, then jump around the unlink blocks if any.
3448 frame_->EmitPush(Immediate(Factory::undefined_value()));
3449 __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
3450 if (nof_unlinks > 0) {
3451 finally_block.Jump();
3452 }
3453 }
3454
3455 // Generate code to unlink and set the state for the (formerly)
3456 // shadowing targets that have been jumped to.
3457 for (int i = 0; i < shadows.length(); i++) {
3458 if (shadows[i]->is_linked()) {
3459 // If we have come from the shadowed return, the return value is
3460 // on the virtual frame. We must preserve it until it is
3461 // pushed.
3462 if (i == kReturnShadowIndex) {
3463 Result return_value;
3464 shadows[i]->Bind(&return_value);
3465 return_value.ToRegister(eax);
3466 } else {
3467 shadows[i]->Bind();
3468 }
3469 // Because we can be jumping here (to spilled code) from
3470 // unspilled code, we need to reestablish a spilled frame at
3471 // this block.
3472 frame_->SpillAll();
3473
3474 // Reload sp from the top handler, because some statements that
3475 // we break from (eg, for...in) may have left stuff on the
3476 // stack.
3477 __ mov(esp, Operand::StaticVariable(handler_address));
3478 frame_->Forget(frame_->height() - handler_height);
3479
3480 // Unlink this handler and drop it from the frame.
3481 ASSERT(StackHandlerConstants::kNextOffset == 0);
3482 frame_->EmitPop(Operand::StaticVariable(handler_address));
3483 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3484
3485 if (i == kReturnShadowIndex) {
3486 // If this target shadowed the function return, materialize
3487 // the return value on the stack.
3488 frame_->EmitPush(eax);
3489 } else {
3490 // Fake TOS for targets that shadowed breaks and continues.
3491 frame_->EmitPush(Immediate(Factory::undefined_value()));
3492 }
3493 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
3494 if (--nof_unlinks > 0) {
3495 // If this is not the last unlink block, jump around the next.
3496 finally_block.Jump();
3497 }
3498 }
3499 }
3500
3501 // --- Finally block ---
3502 finally_block.Bind();
3503
3504 // Push the state on the stack.
3505 frame_->EmitPush(ecx);
3506
3507 // We keep two elements on the stack - the (possibly faked) result
3508 // and the state - while evaluating the finally block.
3509 //
3510 // Generate code for the statements in the finally block.
3511 VisitStatementsAndSpill(node->finally_block()->statements());
3512
3513 if (has_valid_frame()) {
3514 // Restore state and return value or faked TOS.
3515 frame_->EmitPop(ecx);
3516 frame_->EmitPop(eax);
3517 }
3518
3519 // Generate code to jump to the right destination for all used
3520 // formerly shadowing targets. Deallocate each shadow target.
3521 for (int i = 0; i < shadows.length(); i++) {
3522 if (has_valid_frame() && shadows[i]->is_bound()) {
3523 BreakTarget* original = shadows[i]->other_target();
3524 __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
3525 if (i == kReturnShadowIndex) {
3526 // The return value is (already) in eax.
3527 Result return_value = allocator_->Allocate(eax);
3528 ASSERT(return_value.is_valid());
3529 if (function_return_is_shadowed_) {
3530 original->Branch(equal, &return_value);
3531 } else {
3532 // Branch around the preparation for return which may emit
3533 // code.
3534 JumpTarget skip;
3535 skip.Branch(not_equal);
3536 frame_->PrepareForReturn();
3537 original->Jump(&return_value);
3538 skip.Bind();
3539 }
3540 } else {
3541 original->Branch(equal);
3542 }
3543 }
3544 }
3545
3546 if (has_valid_frame()) {
3547 // Check if we need to rethrow the exception.
3548 JumpTarget exit;
3549 __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
3550 exit.Branch(not_equal);
3551
3552 // Rethrow exception.
3553 frame_->EmitPush(eax); // undo pop from above
3554 frame_->CallRuntime(Runtime::kReThrow, 1);
3555
3556 // Done.
3557 exit.Bind();
3558 }
3559}
3560
3561
3562void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
3563 ASSERT(!in_spilled_code());
3564 Comment cmnt(masm_, "[ DebuggerStatement");
3565 CodeForStatementPosition(node);
3566#ifdef ENABLE_DEBUGGER_SUPPORT
3567 // Spill everything, even constants, to the frame.
3568 frame_->SpillAll();
3569 frame_->CallRuntime(Runtime::kDebugBreak, 0);
3570 // Ignore the return value.
3571#endif
3572}
3573
3574
3575void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
3576 // Call the runtime to instantiate the function boilerplate object.
3577 // The inevitable call will sync frame elements to memory anyway, so
3578 // we do it eagerly to allow us to push the arguments directly into
3579 // place.
3580 ASSERT(boilerplate->IsBoilerplate());
3581 frame_->SyncRange(0, frame_->element_count() - 1);
3582
3583 // Push the boilerplate on the stack.
3584 frame_->EmitPush(Immediate(boilerplate));
3585
3586 // Create a new closure.
3587 frame_->EmitPush(esi);
3588 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
3589 frame_->Push(&result);
3590}
3591
3592
3593void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
3594 Comment cmnt(masm_, "[ FunctionLiteral");
3595
3596 // Build the function boilerplate and instantiate it.
3597 Handle<JSFunction> boilerplate = BuildBoilerplate(node);
3598 // Check for stack-overflow exception.
3599 if (HasStackOverflow()) return;
3600 InstantiateBoilerplate(boilerplate);
3601}
3602
3603
3604void CodeGenerator::VisitFunctionBoilerplateLiteral(
3605 FunctionBoilerplateLiteral* node) {
3606 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
3607 InstantiateBoilerplate(node->boilerplate());
3608}
3609
3610
3611void CodeGenerator::VisitConditional(Conditional* node) {
3612 Comment cmnt(masm_, "[ Conditional");
3613 JumpTarget then;
3614 JumpTarget else_;
3615 JumpTarget exit;
3616 ControlDestination dest(&then, &else_, true);
3617 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
3618
3619 if (dest.false_was_fall_through()) {
3620 // The else target was bound, so we compile the else part first.
3621 Load(node->else_expression(), typeof_state());
3622
3623 if (then.is_linked()) {
3624 exit.Jump();
3625 then.Bind();
3626 Load(node->then_expression(), typeof_state());
3627 }
3628 } else {
3629 // The then target was bound, so we compile the then part first.
3630 Load(node->then_expression(), typeof_state());
3631
3632 if (else_.is_linked()) {
3633 exit.Jump();
3634 else_.Bind();
3635 Load(node->else_expression(), typeof_state());
3636 }
3637 }
3638
3639 exit.Bind();
3640}
3641
3642
3643void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
3644 if (slot->type() == Slot::LOOKUP) {
3645 ASSERT(slot->var()->is_dynamic());
3646
3647 JumpTarget slow;
3648 JumpTarget done;
3649 Result value;
3650
3651 // Generate fast-case code for variables that might be shadowed by
3652 // eval-introduced variables. Eval is used a lot without
3653 // introducing variables. In those cases, we do not want to
3654 // perform a runtime call for all variables in the scope
3655 // containing the eval.
3656 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3657 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
3658 // If there was no control flow to slow, we can exit early.
3659 if (!slow.is_linked()) {
3660 frame_->Push(&value);
3661 return;
3662 }
3663
3664 done.Jump(&value);
3665
3666 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3667 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3668 // Only generate the fast case for locals that rewrite to slots.
3669 // This rules out argument loads.
3670 if (potential_slot != NULL) {
3671 // Allocate a fresh register to use as a temp in
3672 // ContextSlotOperandCheckExtensions and to hold the result
3673 // value.
3674 value = allocator_->Allocate();
3675 ASSERT(value.is_valid());
3676 __ mov(value.reg(),
3677 ContextSlotOperandCheckExtensions(potential_slot,
3678 value,
3679 &slow));
3680 if (potential_slot->var()->mode() == Variable::CONST) {
3681 __ cmp(value.reg(), Factory::the_hole_value());
3682 done.Branch(not_equal, &value);
3683 __ mov(value.reg(), Factory::undefined_value());
3684 }
3685 // There is always control flow to slow from
3686 // ContextSlotOperandCheckExtensions so we have to jump around
3687 // it.
3688 done.Jump(&value);
3689 }
3690 }
3691
3692 slow.Bind();
3693 // A runtime call is inevitable. We eagerly sync frame elements
3694 // to memory so that we can push the arguments directly into place
3695 // on top of the frame.
3696 frame_->SyncRange(0, frame_->element_count() - 1);
3697 frame_->EmitPush(esi);
3698 frame_->EmitPush(Immediate(slot->var()->name()));
3699 if (typeof_state == INSIDE_TYPEOF) {
3700 value =
3701 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
3702 } else {
3703 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3704 }
3705
3706 done.Bind(&value);
3707 frame_->Push(&value);
3708
3709 } else if (slot->var()->mode() == Variable::CONST) {
3710 // Const slots may contain 'the hole' value (the constant hasn't been
3711 // initialized yet) which needs to be converted into the 'undefined'
3712 // value.
3713 //
3714 // We currently spill the virtual frame because constants use the
3715 // potentially unsafe direct-frame access of SlotOperand.
3716 VirtualFrame::SpilledScope spilled_scope;
3717 Comment cmnt(masm_, "[ Load const");
3718 JumpTarget exit;
3719 __ mov(ecx, SlotOperand(slot, ecx));
3720 __ cmp(ecx, Factory::the_hole_value());
3721 exit.Branch(not_equal);
3722 __ mov(ecx, Factory::undefined_value());
3723 exit.Bind();
3724 frame_->EmitPush(ecx);
3725
3726 } else if (slot->type() == Slot::PARAMETER) {
3727 frame_->PushParameterAt(slot->index());
3728
3729 } else if (slot->type() == Slot::LOCAL) {
3730 frame_->PushLocalAt(slot->index());
3731
3732 } else {
3733 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
3734 // here.
3735 //
3736 // The use of SlotOperand below is safe for an unspilled frame
3737 // because it will always be a context slot.
3738 ASSERT(slot->type() == Slot::CONTEXT);
3739 Result temp = allocator_->Allocate();
3740 ASSERT(temp.is_valid());
3741 __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
3742 frame_->Push(&temp);
3743 }
3744}
3745
3746
3747void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
3748 TypeofState state) {
3749 LoadFromSlot(slot, state);
3750
3751 // Bail out quickly if we're not using lazy arguments allocation.
3752 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
3753
3754 // ... or if the slot isn't a non-parameter arguments slot.
3755 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
3756
3757 // Pop the loaded value from the stack.
3758 Result value = frame_->Pop();
3759
3760 // If the loaded value is a constant, we know if the arguments
3761 // object has been lazily loaded yet.
3762 if (value.is_constant()) {
3763 if (value.handle()->IsTheHole()) {
3764 Result arguments = StoreArgumentsObject(false);
3765 frame_->Push(&arguments);
3766 } else {
3767 frame_->Push(&value);
3768 }
3769 return;
3770 }
3771
3772 // The loaded value is in a register. If it is the sentinel that
3773 // indicates that we haven't loaded the arguments object yet, we
3774 // need to do it now.
3775 JumpTarget exit;
3776 __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
3777 frame_->Push(&value);
3778 exit.Branch(not_equal);
3779 Result arguments = StoreArgumentsObject(false);
3780 frame_->SetElementAt(0, &arguments);
3781 exit.Bind();
3782}
3783
3784
3785Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
3786 Slot* slot,
3787 TypeofState typeof_state,
3788 JumpTarget* slow) {
3789 // Check that no extension objects have been created by calls to
3790 // eval from the current scope to the global scope.
3791 Register context = esi;
3792 Result tmp = allocator_->Allocate();
3793 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
3794
3795 Scope* s = scope();
3796 while (s != NULL) {
3797 if (s->num_heap_slots() > 0) {
3798 if (s->calls_eval()) {
3799 // Check that extension is NULL.
3800 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
3801 Immediate(0));
3802 slow->Branch(not_equal, not_taken);
3803 }
3804 // Load next context in chain.
3805 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
3806 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
3807 context = tmp.reg();
3808 }
3809 // If no outer scope calls eval, we do not need to check more
3810 // context extensions. If we have reached an eval scope, we check
3811 // all extensions from this point.
3812 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3813 s = s->outer_scope();
3814 }
3815
3816 if (s != NULL && s->is_eval_scope()) {
3817 // Loop up the context chain. There is no frame effect so it is
3818 // safe to use raw labels here.
3819 Label next, fast;
3820 if (!context.is(tmp.reg())) {
3821 __ mov(tmp.reg(), context);
3822 }
3823 __ bind(&next);
3824 // Terminate at global context.
3825 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
3826 Immediate(Factory::global_context_map()));
3827 __ j(equal, &fast);
3828 // Check that extension is NULL.
3829 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
3830 slow->Branch(not_equal, not_taken);
3831 // Load next context in chain.
3832 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
3833 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
3834 __ jmp(&next);
3835 __ bind(&fast);
3836 }
3837 tmp.Unuse();
3838
3839 // All extension objects were empty and it is safe to use a global
3840 // load IC call.
3841 LoadGlobal();
3842 frame_->Push(slot->var()->name());
3843 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
3844 ? RelocInfo::CODE_TARGET
3845 : RelocInfo::CODE_TARGET_CONTEXT;
3846 Result answer = frame_->CallLoadIC(mode);
3847 // A test eax instruction following the call signals that the inobject
3848 // property case was inlined. Ensure that there is not a test eax
3849 // instruction here.
3850 __ nop();
3851 // Discard the global object. The result is in answer.
3852 frame_->Drop();
3853 return answer;
3854}
3855
3856
3857void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3858 if (slot->type() == Slot::LOOKUP) {
3859 ASSERT(slot->var()->is_dynamic());
3860
3861 // For now, just do a runtime call. Since the call is inevitable,
3862 // we eagerly sync the virtual frame so we can directly push the
3863 // arguments into place.
3864 frame_->SyncRange(0, frame_->element_count() - 1);
3865
3866 frame_->EmitPush(esi);
3867 frame_->EmitPush(Immediate(slot->var()->name()));
3868
3869 Result value;
3870 if (init_state == CONST_INIT) {
3871 // Same as the case for a normal store, but ignores attribute
3872 // (e.g. READ_ONLY) of context slot so that we can initialize const
3873 // properties (introduced via eval("const foo = (some expr);")). Also,
3874 // uses the current function context instead of the top context.
3875 //
3876 // Note that we must declare the foo upon entry of eval(), via a
3877 // context slot declaration, but we cannot initialize it at the same
3878 // time, because the const declaration may be at the end of the eval
3879 // code (sigh...) and the const variable may have been used before
3880 // (where its value is 'undefined'). Thus, we can only do the
3881 // initialization when we actually encounter the expression and when
3882 // the expression operands are defined and valid, and thus we need the
3883 // split into 2 operations: declaration of the context slot followed
3884 // by initialization.
3885 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3886 } else {
3887 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
3888 }
3889 // Storing a variable must keep the (new) value on the expression
3890 // stack. This is necessary for compiling chained assignment
3891 // expressions.
3892 frame_->Push(&value);
3893
3894 } else {
3895 ASSERT(!slot->var()->is_dynamic());
3896
3897 JumpTarget exit;
3898 if (init_state == CONST_INIT) {
3899 ASSERT(slot->var()->mode() == Variable::CONST);
3900 // Only the first const initialization must be executed (the slot
3901 // still contains 'the hole' value). When the assignment is executed,
3902 // the code is identical to a normal store (see below).
3903 //
3904 // We spill the frame in the code below because the direct-frame
3905 // access of SlotOperand is potentially unsafe with an unspilled
3906 // frame.
3907 VirtualFrame::SpilledScope spilled_scope;
3908 Comment cmnt(masm_, "[ Init const");
3909 __ mov(ecx, SlotOperand(slot, ecx));
3910 __ cmp(ecx, Factory::the_hole_value());
3911 exit.Branch(not_equal);
3912 }
3913
3914 // We must execute the store. Storing a variable must keep the (new)
3915 // value on the stack. This is necessary for compiling assignment
3916 // expressions.
3917 //
3918 // Note: We will reach here even with slot->var()->mode() ==
3919 // Variable::CONST because of const declarations which will initialize
3920 // consts to 'the hole' value and by doing so, end up calling this code.
3921 if (slot->type() == Slot::PARAMETER) {
3922 frame_->StoreToParameterAt(slot->index());
3923 } else if (slot->type() == Slot::LOCAL) {
3924 frame_->StoreToLocalAt(slot->index());
3925 } else {
3926 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
3927 //
3928 // The use of SlotOperand below is safe for an unspilled frame
3929 // because the slot is a context slot.
3930 ASSERT(slot->type() == Slot::CONTEXT);
3931 frame_->Dup();
3932 Result value = frame_->Pop();
3933 value.ToRegister();
3934 Result start = allocator_->Allocate();
3935 ASSERT(start.is_valid());
3936 __ mov(SlotOperand(slot, start.reg()), value.reg());
3937 // RecordWrite may destroy the value registers.
3938 //
3939 // TODO(204): Avoid actually spilling when the value is not
3940 // needed (probably the common case).
3941 frame_->Spill(value.reg());
3942 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3943 Result temp = allocator_->Allocate();
3944 ASSERT(temp.is_valid());
3945 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
3946 // The results start, value, and temp are unused by going out of
3947 // scope.
3948 }
3949
3950 exit.Bind();
3951 }
3952}
3953
3954
3955void CodeGenerator::VisitSlot(Slot* node) {
3956 Comment cmnt(masm_, "[ Slot");
3957 LoadFromSlotCheckForArguments(node, typeof_state());
3958}
3959
3960
3961void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3962 Comment cmnt(masm_, "[ VariableProxy");
3963 Variable* var = node->var();
3964 Expression* expr = var->rewrite();
3965 if (expr != NULL) {
3966 Visit(expr);
3967 } else {
3968 ASSERT(var->is_global());
3969 Reference ref(this, node);
3970 ref.GetValue(typeof_state());
3971 }
3972}
3973
3974
3975void CodeGenerator::VisitLiteral(Literal* node) {
3976 Comment cmnt(masm_, "[ Literal");
3977 frame_->Push(node->handle());
3978}
3979
3980
3981void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
3982 ASSERT(target.is_valid());
3983 ASSERT(value->IsSmi());
3984 int bits = reinterpret_cast<int>(*value);
3985 __ Set(target, Immediate(bits & 0x0000FFFF));
3986 __ xor_(target, bits & 0xFFFF0000);
3987}
3988
3989
3990bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
3991 if (!value->IsSmi()) return false;
3992 int int_value = Smi::cast(*value)->value();
3993 return !is_intn(int_value, kMaxSmiInlinedBits);
3994}
3995
3996
3997// Materialize the regexp literal 'node' in the literals array
3998// 'literals' of the function. Leave the regexp boilerplate in
3999// 'boilerplate'.
4000class DeferredRegExpLiteral: public DeferredCode {
4001 public:
4002 DeferredRegExpLiteral(Register boilerplate,
4003 Register literals,
4004 RegExpLiteral* node)
4005 : boilerplate_(boilerplate), literals_(literals), node_(node) {
4006 set_comment("[ DeferredRegExpLiteral");
4007 }
4008
4009 void Generate();
4010
4011 private:
4012 Register boilerplate_;
4013 Register literals_;
4014 RegExpLiteral* node_;
4015};
4016
4017
4018void DeferredRegExpLiteral::Generate() {
4019 // Since the entry is undefined we call the runtime system to
4020 // compute the literal.
4021 // Literal array (0).
4022 __ push(literals_);
4023 // Literal index (1).
4024 __ push(Immediate(Smi::FromInt(node_->literal_index())));
4025 // RegExp pattern (2).
4026 __ push(Immediate(node_->pattern()));
4027 // RegExp flags (3).
4028 __ push(Immediate(node_->flags()));
4029 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
4030 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
4031}
4032
4033
4034void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
4035 Comment cmnt(masm_, "[ RegExp Literal");
4036
4037 // Retrieve the literals array and check the allocated entry. Begin
4038 // with a writable copy of the function of this activation in a
4039 // register.
4040 frame_->PushFunction();
4041 Result literals = frame_->Pop();
4042 literals.ToRegister();
4043 frame_->Spill(literals.reg());
4044
4045 // Load the literals array of the function.
4046 __ mov(literals.reg(),
4047 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4048
4049 // Load the literal at the ast saved index.
4050 Result boilerplate = allocator_->Allocate();
4051 ASSERT(boilerplate.is_valid());
4052 int literal_offset =
4053 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4054 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4055
4056 // Check whether we need to materialize the RegExp object. If so,
4057 // jump to the deferred code passing the literals array.
4058 DeferredRegExpLiteral* deferred =
4059 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
4060 __ cmp(boilerplate.reg(), Factory::undefined_value());
4061 deferred->Branch(equal);
4062 deferred->BindExit();
4063 literals.Unuse();
4064
4065 // Push the boilerplate object.
4066 frame_->Push(&boilerplate);
4067}
4068
4069
4070// Materialize the object literal 'node' in the literals array
4071// 'literals' of the function. Leave the object boilerplate in
4072// 'boilerplate'.
4073class DeferredObjectLiteral: public DeferredCode {
4074 public:
4075 DeferredObjectLiteral(Register boilerplate,
4076 Register literals,
4077 ObjectLiteral* node)
4078 : boilerplate_(boilerplate), literals_(literals), node_(node) {
4079 set_comment("[ DeferredObjectLiteral");
4080 }
4081
4082 void Generate();
4083
4084 private:
4085 Register boilerplate_;
4086 Register literals_;
4087 ObjectLiteral* node_;
4088};
4089
4090
4091void DeferredObjectLiteral::Generate() {
4092 // Since the entry is undefined we call the runtime system to
4093 // compute the literal.
4094 // Literal array (0).
4095 __ push(literals_);
4096 // Literal index (1).
4097 __ push(Immediate(Smi::FromInt(node_->literal_index())));
4098 // Constant properties (2).
4099 __ push(Immediate(node_->constant_properties()));
4100 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
4101 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
4102}
4103
4104
4105void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
4106 Comment cmnt(masm_, "[ ObjectLiteral");
4107
4108 // Retrieve the literals array and check the allocated entry. Begin
4109 // with a writable copy of the function of this activation in a
4110 // register.
4111 frame_->PushFunction();
4112 Result literals = frame_->Pop();
4113 literals.ToRegister();
4114 frame_->Spill(literals.reg());
4115
4116 // Load the literals array of the function.
4117 __ mov(literals.reg(),
4118 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4119
4120 // Load the literal at the ast saved index.
4121 Result boilerplate = allocator_->Allocate();
4122 ASSERT(boilerplate.is_valid());
4123 int literal_offset =
4124 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4125 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4126
4127 // Check whether we need to materialize the object literal boilerplate.
4128 // If so, jump to the deferred code passing the literals array.
4129 DeferredObjectLiteral* deferred =
4130 new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
4131 __ cmp(boilerplate.reg(), Factory::undefined_value());
4132 deferred->Branch(equal);
4133 deferred->BindExit();
4134 literals.Unuse();
4135
4136 // Push the boilerplate object.
4137 frame_->Push(&boilerplate);
4138 // Clone the boilerplate object.
4139 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
4140 if (node->depth() == 1) {
4141 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
4142 }
4143 Result clone = frame_->CallRuntime(clone_function_id, 1);
4144 // Push the newly cloned literal object as the result.
4145 frame_->Push(&clone);
4146
4147 for (int i = 0; i < node->properties()->length(); i++) {
4148 ObjectLiteral::Property* property = node->properties()->at(i);
4149 switch (property->kind()) {
4150 case ObjectLiteral::Property::CONSTANT:
4151 break;
4152 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
4153 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
4154 // else fall through.
4155 case ObjectLiteral::Property::COMPUTED: {
4156 Handle<Object> key(property->key()->handle());
4157 if (key->IsSymbol()) {
4158 // Duplicate the object as the IC receiver.
4159 frame_->Dup();
4160 Load(property->value());
4161 frame_->Push(key);
4162 Result ignored = frame_->CallStoreIC();
4163 // Drop the duplicated receiver and ignore the result.
4164 frame_->Drop();
4165 break;
4166 }
4167 // Fall through
4168 }
4169 case ObjectLiteral::Property::PROTOTYPE: {
4170 // Duplicate the object as an argument to the runtime call.
4171 frame_->Dup();
4172 Load(property->key());
4173 Load(property->value());
4174 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
4175 // Ignore the result.
4176 break;
4177 }
4178 case ObjectLiteral::Property::SETTER: {
4179 // Duplicate the object as an argument to the runtime call.
4180 frame_->Dup();
4181 Load(property->key());
4182 frame_->Push(Smi::FromInt(1));
4183 Load(property->value());
4184 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4185 // Ignore the result.
4186 break;
4187 }
4188 case ObjectLiteral::Property::GETTER: {
4189 // Duplicate the object as an argument to the runtime call.
4190 frame_->Dup();
4191 Load(property->key());
4192 frame_->Push(Smi::FromInt(0));
4193 Load(property->value());
4194 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4195 // Ignore the result.
4196 break;
4197 }
4198 default: UNREACHABLE();
4199 }
4200 }
4201}
4202
4203
4204// Materialize the array literal 'node' in the literals array 'literals'
4205// of the function. Leave the array boilerplate in 'boilerplate'.
4206class DeferredArrayLiteral: public DeferredCode {
4207 public:
4208 DeferredArrayLiteral(Register boilerplate,
4209 Register literals,
4210 ArrayLiteral* node)
4211 : boilerplate_(boilerplate), literals_(literals), node_(node) {
4212 set_comment("[ DeferredArrayLiteral");
4213 }
4214
4215 void Generate();
4216
4217 private:
4218 Register boilerplate_;
4219 Register literals_;
4220 ArrayLiteral* node_;
4221};
4222
4223
4224void DeferredArrayLiteral::Generate() {
4225 // Since the entry is undefined we call the runtime system to
4226 // compute the literal.
4227 // Literal array (0).
4228 __ push(literals_);
4229 // Literal index (1).
4230 __ push(Immediate(Smi::FromInt(node_->literal_index())));
4231 // Constant properties (2).
4232 __ push(Immediate(node_->literals()));
4233 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
4234 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
4235}
4236
4237
4238void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
4239 Comment cmnt(masm_, "[ ArrayLiteral");
4240
4241 // Retrieve the literals array and check the allocated entry. Begin
4242 // with a writable copy of the function of this activation in a
4243 // register.
4244 frame_->PushFunction();
4245 Result literals = frame_->Pop();
4246 literals.ToRegister();
4247 frame_->Spill(literals.reg());
4248
4249 // Load the literals array of the function.
4250 __ mov(literals.reg(),
4251 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4252
4253 // Load the literal at the ast saved index.
4254 Result boilerplate = allocator_->Allocate();
4255 ASSERT(boilerplate.is_valid());
4256 int literal_offset =
4257 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4258 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4259
4260 // Check whether we need to materialize the object literal boilerplate.
4261 // If so, jump to the deferred code passing the literals array.
4262 DeferredArrayLiteral* deferred =
4263 new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
4264 __ cmp(boilerplate.reg(), Factory::undefined_value());
4265 deferred->Branch(equal);
4266 deferred->BindExit();
4267 literals.Unuse();
4268
4269 // Push the resulting array literal boilerplate on the stack.
4270 frame_->Push(&boilerplate);
4271 // Clone the boilerplate object.
4272 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
4273 if (node->depth() == 1) {
4274 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
4275 }
4276 Result clone = frame_->CallRuntime(clone_function_id, 1);
4277 // Push the newly cloned literal object as the result.
4278 frame_->Push(&clone);
4279
4280 // Generate code to set the elements in the array that are not
4281 // literals.
4282 for (int i = 0; i < node->values()->length(); i++) {
4283 Expression* value = node->values()->at(i);
4284
4285 // If value is a literal the property value is already set in the
4286 // boilerplate object.
4287 if (value->AsLiteral() != NULL) continue;
4288 // If value is a materialized literal the property value is already set
4289 // in the boilerplate object if it is simple.
4290 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
4291
4292 // The property must be set by generated code.
4293 Load(value);
4294
4295 // Get the property value off the stack.
4296 Result prop_value = frame_->Pop();
4297 prop_value.ToRegister();
4298
4299 // Fetch the array literal while leaving a copy on the stack and
4300 // use it to get the elements array.
4301 frame_->Dup();
4302 Result elements = frame_->Pop();
4303 elements.ToRegister();
4304 frame_->Spill(elements.reg());
4305 // Get the elements array.
4306 __ mov(elements.reg(),
4307 FieldOperand(elements.reg(), JSObject::kElementsOffset));
4308
4309 // Write to the indexed properties array.
4310 int offset = i * kPointerSize + FixedArray::kHeaderSize;
4311 __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
4312
4313 // Update the write barrier for the array address.
4314 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
4315 Result scratch = allocator_->Allocate();
4316 ASSERT(scratch.is_valid());
4317 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
4318 }
4319}
4320
4321
4322void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
4323 ASSERT(!in_spilled_code());
4324 // Call runtime routine to allocate the catch extension object and
4325 // assign the exception value to the catch variable.
4326 Comment cmnt(masm_, "[ CatchExtensionObject");
4327 Load(node->key());
4328 Load(node->value());
4329 Result result =
4330 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
4331 frame_->Push(&result);
4332}
4333
4334
4335void CodeGenerator::VisitAssignment(Assignment* node) {
4336 Comment cmnt(masm_, "[ Assignment");
4337
4338 { Reference target(this, node->target());
4339 if (target.is_illegal()) {
4340 // Fool the virtual frame into thinking that we left the assignment's
4341 // value on the frame.
4342 frame_->Push(Smi::FromInt(0));
4343 return;
4344 }
4345 Variable* var = node->target()->AsVariableProxy()->AsVariable();
4346
4347 if (node->starts_initialization_block()) {
4348 ASSERT(target.type() == Reference::NAMED ||
4349 target.type() == Reference::KEYED);
4350 // Change to slow case in the beginning of an initialization
4351 // block to avoid the quadratic behavior of repeatedly adding
4352 // fast properties.
4353
4354 // The receiver is the argument to the runtime call. It is the
4355 // first value pushed when the reference was loaded to the
4356 // frame.
4357 frame_->PushElementAt(target.size() - 1);
4358 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
4359 }
4360 if (node->op() == Token::ASSIGN ||
4361 node->op() == Token::INIT_VAR ||
4362 node->op() == Token::INIT_CONST) {
4363 Load(node->value());
4364
4365 } else {
4366 Literal* literal = node->value()->AsLiteral();
4367 bool overwrite_value =
4368 (node->value()->AsBinaryOperation() != NULL &&
4369 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
4370 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
4371 // There are two cases where the target is not read in the right hand
4372 // side, that are easy to test for: the right hand side is a literal,
4373 // or the right hand side is a different variable. TakeValue invalidates
4374 // the target, with an implicit promise that it will be written to again
4375 // before it is read.
4376 if (literal != NULL || (right_var != NULL && right_var != var)) {
4377 target.TakeValue(NOT_INSIDE_TYPEOF);
4378 } else {
4379 target.GetValue(NOT_INSIDE_TYPEOF);
4380 }
4381 Load(node->value());
4382 GenericBinaryOperation(node->binary_op(),
4383 node->type(),
4384 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
4385 }
4386
4387 if (var != NULL &&
4388 var->mode() == Variable::CONST &&
4389 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
4390 // Assignment ignored - leave the value on the stack.
4391 } else {
4392 CodeForSourcePosition(node->position());
4393 if (node->op() == Token::INIT_CONST) {
4394 // Dynamic constant initializations must use the function context
4395 // and initialize the actual constant declared. Dynamic variable
4396 // initializations are simply assignments and use SetValue.
4397 target.SetValue(CONST_INIT);
4398 } else {
4399 target.SetValue(NOT_CONST_INIT);
4400 }
4401 if (node->ends_initialization_block()) {
4402 ASSERT(target.type() == Reference::NAMED ||
4403 target.type() == Reference::KEYED);
4404 // End of initialization block. Revert to fast case. The
4405 // argument to the runtime call is the receiver, which is the
4406 // first value pushed as part of the reference, which is below
4407 // the lhs value.
4408 frame_->PushElementAt(target.size());
4409 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
4410 }
4411 }
4412 }
4413}
4414
4415
4416void CodeGenerator::VisitThrow(Throw* node) {
4417 Comment cmnt(masm_, "[ Throw");
4418 Load(node->exception());
4419 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
4420 frame_->Push(&result);
4421}
4422
4423
4424void CodeGenerator::VisitProperty(Property* node) {
4425 Comment cmnt(masm_, "[ Property");
4426 Reference property(this, node);
4427 property.GetValue(typeof_state());
4428}
4429
4430
4431void CodeGenerator::VisitCall(Call* node) {
4432 Comment cmnt(masm_, "[ Call");
4433
4434 Expression* function = node->expression();
4435 ZoneList<Expression*>* args = node->arguments();
4436
4437 // Check if the function is a variable or a property.
4438 Variable* var = function->AsVariableProxy()->AsVariable();
4439 Property* property = function->AsProperty();
4440
4441 // ------------------------------------------------------------------------
4442 // Fast-case: Use inline caching.
4443 // ---
4444 // According to ECMA-262, section 11.2.3, page 44, the function to call
4445 // must be resolved after the arguments have been evaluated. The IC code
4446 // automatically handles this by loading the arguments before the function
4447 // is resolved in cache misses (this also holds for megamorphic calls).
4448 // ------------------------------------------------------------------------
4449
4450 if (var != NULL && var->is_possibly_eval()) {
4451 // ----------------------------------
4452 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
4453 // ----------------------------------
4454
4455 // In a call to eval, we first call %ResolvePossiblyDirectEval to
4456 // resolve the function we need to call and the receiver of the
4457 // call. Then we call the resolved function using the given
4458 // arguments.
4459
4460 // Prepare the stack for the call to the resolved function.
4461 Load(function);
4462
4463 // Allocate a frame slot for the receiver.
4464 frame_->Push(Factory::undefined_value());
4465 int arg_count = args->length();
4466 for (int i = 0; i < arg_count; i++) {
4467 Load(args->at(i));
4468 }
4469
4470 // Prepare the stack for the call to ResolvePossiblyDirectEval.
4471 frame_->PushElementAt(arg_count + 1);
4472 if (arg_count > 0) {
4473 frame_->PushElementAt(arg_count);
4474 } else {
4475 frame_->Push(Factory::undefined_value());
4476 }
4477
4478 // Resolve the call.
4479 Result result =
4480 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
4481
4482 // Touch up the stack with the right values for the function and the
4483 // receiver. Use a scratch register to avoid destroying the result.
4484 Result scratch = allocator_->Allocate();
4485 ASSERT(scratch.is_valid());
4486 __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
4487 frame_->SetElementAt(arg_count + 1, &scratch);
4488
4489 // We can reuse the result register now.
4490 frame_->Spill(result.reg());
4491 __ mov(result.reg(),
4492 FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
4493 frame_->SetElementAt(arg_count, &result);
4494
4495 // Call the function.
4496 CodeForSourcePosition(node->position());
4497 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4498 CallFunctionStub call_function(arg_count, in_loop);
4499 result = frame_->CallStub(&call_function, arg_count + 1);
4500
4501 // Restore the context and overwrite the function on the stack with
4502 // the result.
4503 frame_->RestoreContextRegister();
4504 frame_->SetElementAt(0, &result);
4505
4506 } else if (var != NULL && !var->is_this() && var->is_global()) {
4507 // ----------------------------------
4508 // JavaScript example: 'foo(1, 2, 3)' // foo is global
4509 // ----------------------------------
4510
4511 // Push the name of the function and the receiver onto the stack.
4512 frame_->Push(var->name());
4513
4514 // Pass the global object as the receiver and let the IC stub
4515 // patch the stack to use the global proxy as 'this' in the
4516 // invoked function.
4517 LoadGlobal();
4518
4519 // Load the arguments.
4520 int arg_count = args->length();
4521 for (int i = 0; i < arg_count; i++) {
4522 Load(args->at(i));
4523 }
4524
4525 // Call the IC initialization code.
4526 CodeForSourcePosition(node->position());
4527 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
4528 arg_count,
4529 loop_nesting());
4530 frame_->RestoreContextRegister();
4531 // Replace the function on the stack with the result.
4532 frame_->SetElementAt(0, &result);
4533
4534 } else if (var != NULL && var->slot() != NULL &&
4535 var->slot()->type() == Slot::LOOKUP) {
4536 // ----------------------------------
4537 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
4538 // ----------------------------------
4539
4540 // Load the function from the context. Sync the frame so we can
4541 // push the arguments directly into place.
4542 frame_->SyncRange(0, frame_->element_count() - 1);
4543 frame_->EmitPush(esi);
4544 frame_->EmitPush(Immediate(var->name()));
4545 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4546 // The runtime call returns a pair of values in eax and edx. The
4547 // looked-up function is in eax and the receiver is in edx. These
4548 // register references are not ref counted here. We spill them
4549 // eagerly since they are arguments to an inevitable call (and are
4550 // not sharable by the arguments).
4551 ASSERT(!allocator()->is_used(eax));
4552 frame_->EmitPush(eax);
4553
4554 // Load the receiver.
4555 ASSERT(!allocator()->is_used(edx));
4556 frame_->EmitPush(edx);
4557
4558 // Call the function.
4559 CallWithArguments(args, node->position());
4560
4561 } else if (property != NULL) {
4562 // Check if the key is a literal string.
4563 Literal* literal = property->key()->AsLiteral();
4564
4565 if (literal != NULL && literal->handle()->IsSymbol()) {
4566 // ------------------------------------------------------------------
4567 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4568 // ------------------------------------------------------------------
4569
4570 Handle<String> name = Handle<String>::cast(literal->handle());
4571
4572 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4573 name->IsEqualTo(CStrVector("apply")) &&
4574 args->length() == 2 &&
4575 args->at(1)->AsVariableProxy() != NULL &&
4576 args->at(1)->AsVariableProxy()->IsArguments()) {
4577 // Use the optimized Function.prototype.apply that avoids
4578 // allocating lazily allocated arguments objects.
4579 CallApplyLazy(property,
4580 args->at(0),
4581 args->at(1)->AsVariableProxy(),
4582 node->position());
4583
4584 } else {
4585 // Push the name of the function and the receiver onto the stack.
4586 frame_->Push(name);
4587 Load(property->obj());
4588
4589 // Load the arguments.
4590 int arg_count = args->length();
4591 for (int i = 0; i < arg_count; i++) {
4592 Load(args->at(i));
4593 }
4594
4595 // Call the IC initialization code.
4596 CodeForSourcePosition(node->position());
4597 Result result =
4598 frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
4599 loop_nesting());
4600 frame_->RestoreContextRegister();
4601 // Replace the function on the stack with the result.
4602 frame_->SetElementAt(0, &result);
4603 }
4604
4605 } else {
4606 // -------------------------------------------
4607 // JavaScript example: 'array[index](1, 2, 3)'
4608 // -------------------------------------------
4609
4610 // Load the function to call from the property through a reference.
4611 Reference ref(this, property);
4612 ref.GetValue(NOT_INSIDE_TYPEOF);
4613
4614 // Pass receiver to called function.
4615 if (property->is_synthetic()) {
4616 // Use global object as receiver.
4617 LoadGlobalReceiver();
4618 } else {
4619 // The reference's size is non-negative.
4620 frame_->PushElementAt(ref.size());
4621 }
4622
4623 // Call the function.
4624 CallWithArguments(args, node->position());
4625 }
4626
4627 } else {
4628 // ----------------------------------
4629 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
4630 // ----------------------------------
4631
4632 // Load the function.
4633 Load(function);
4634
4635 // Pass the global proxy as the receiver.
4636 LoadGlobalReceiver();
4637
4638 // Call the function.
4639 CallWithArguments(args, node->position());
4640 }
4641}
4642
4643
4644void CodeGenerator::VisitCallNew(CallNew* node) {
4645 Comment cmnt(masm_, "[ CallNew");
4646
4647 // According to ECMA-262, section 11.2.2, page 44, the function
4648 // expression in new calls must be evaluated before the
4649 // arguments. This is different from ordinary calls, where the
4650 // actual function to call is resolved after the arguments have been
4651 // evaluated.
4652
4653 // Compute function to call and use the global object as the
4654 // receiver. There is no need to use the global proxy here because
4655 // it will always be replaced with a newly allocated object.
4656 Load(node->expression());
4657 LoadGlobal();
4658
4659 // Push the arguments ("left-to-right") on the stack.
4660 ZoneList<Expression*>* args = node->arguments();
4661 int arg_count = args->length();
4662 for (int i = 0; i < arg_count; i++) {
4663 Load(args->at(i));
4664 }
4665
4666 // Call the construct call builtin that handles allocation and
4667 // constructor invocation.
4668 CodeForSourcePosition(node->position());
4669 Result result = frame_->CallConstructor(arg_count);
4670 // Replace the function on the stack with the result.
4671 frame_->SetElementAt(0, &result);
4672}
4673
4674
4675void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4676 ASSERT(args->length() == 1);
4677 Load(args->at(0));
4678 Result value = frame_->Pop();
4679 value.ToRegister();
4680 ASSERT(value.is_valid());
4681 __ test(value.reg(), Immediate(kSmiTagMask));
4682 value.Unuse();
4683 destination()->Split(zero);
4684}
4685
4686
4687void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4688 // Conditionally generate a log call.
4689 // Args:
4690 // 0 (literal string): The type of logging (corresponds to the flags).
4691 // This is used to determine whether or not to generate the log call.
4692 // 1 (string): Format string. Access the string at argument index 2
4693 // with '%2s' (see Logger::LogRuntime for all the formats).
4694 // 2 (array): Arguments to the format string.
4695 ASSERT_EQ(args->length(), 3);
4696#ifdef ENABLE_LOGGING_AND_PROFILING
4697 if (ShouldGenerateLog(args->at(0))) {
4698 Load(args->at(1));
4699 Load(args->at(2));
4700 frame_->CallRuntime(Runtime::kLog, 2);
4701 }
4702#endif
4703 // Finally, we're expected to leave a value on the top of the stack.
4704 frame_->Push(Factory::undefined_value());
4705}
4706
4707
4708void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4709 ASSERT(args->length() == 1);
4710 Load(args->at(0));
4711 Result value = frame_->Pop();
4712 value.ToRegister();
4713 ASSERT(value.is_valid());
4714 __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
4715 value.Unuse();
4716 destination()->Split(zero);
4717}
4718
4719
4720// This generates code that performs a charCodeAt() call or returns
4721// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
4722// It can handle flat and sliced strings, 8 and 16 bit characters and
4723// cons strings where the answer is found in the left hand branch of the
4724// cons. The slow case will flatten the string, which will ensure that
4725// the answer is in the left hand side the next time around.
4726void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
4727 Comment(masm_, "[ GenerateFastCharCodeAt");
4728 ASSERT(args->length() == 2);
4729
4730 Label slow_case;
4731 Label end;
4732 Label not_a_flat_string;
4733 Label a_cons_string;
4734 Label try_again_with_new_string;
4735 Label ascii_string;
4736 Label got_char_code;
4737
4738 Load(args->at(0));
4739 Load(args->at(1));
4740 Result index = frame_->Pop();
4741 Result object = frame_->Pop();
4742
4743 // Get register ecx to use as shift amount later.
4744 Result shift_amount;
4745 if (object.is_register() && object.reg().is(ecx)) {
4746 Result fresh = allocator_->Allocate();
4747 shift_amount = object;
4748 object = fresh;
4749 __ mov(object.reg(), ecx);
4750 }
4751 if (index.is_register() && index.reg().is(ecx)) {
4752 Result fresh = allocator_->Allocate();
4753 shift_amount = index;
4754 index = fresh;
4755 __ mov(index.reg(), ecx);
4756 }
4757 // There could be references to ecx in the frame. Allocating will
4758 // spill them, otherwise spill explicitly.
4759 if (shift_amount.is_valid()) {
4760 frame_->Spill(ecx);
4761 } else {
4762 shift_amount = allocator()->Allocate(ecx);
4763 }
4764 ASSERT(shift_amount.is_register());
4765 ASSERT(shift_amount.reg().is(ecx));
4766 ASSERT(allocator_->count(ecx) == 1);
4767
4768 // We will mutate the index register and possibly the object register.
4769 // The case where they are somehow the same register is handled
4770 // because we only mutate them in the case where the receiver is a
4771 // heap object and the index is not.
4772 object.ToRegister();
4773 index.ToRegister();
4774 frame_->Spill(object.reg());
4775 frame_->Spill(index.reg());
4776
4777 // We need a single extra temporary register.
4778 Result temp = allocator()->Allocate();
4779 ASSERT(temp.is_valid());
4780
4781 // There is no virtual frame effect from here up to the final result
4782 // push.
4783
4784 // If the receiver is a smi trigger the slow case.
4785 ASSERT(kSmiTag == 0);
4786 __ test(object.reg(), Immediate(kSmiTagMask));
4787 __ j(zero, &slow_case);
4788
4789 // If the index is negative or non-smi trigger the slow case.
4790 ASSERT(kSmiTag == 0);
4791 __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
4792 __ j(not_zero, &slow_case);
4793 // Untag the index.
4794 __ sar(index.reg(), kSmiTagSize);
4795
4796 __ bind(&try_again_with_new_string);
4797 // Fetch the instance type of the receiver into ecx.
4798 __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
4799 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
4800 // If the receiver is not a string trigger the slow case.
4801 __ test(ecx, Immediate(kIsNotStringMask));
4802 __ j(not_zero, &slow_case);
4803
4804 // Here we make assumptions about the tag values and the shifts needed.
4805 // See the comment in objects.h.
4806 ASSERT(kLongStringTag == 0);
4807 ASSERT(kMediumStringTag + String::kLongLengthShift ==
4808 String::kMediumLengthShift);
4809 ASSERT(kShortStringTag + String::kLongLengthShift ==
4810 String::kShortLengthShift);
4811 __ and_(ecx, kStringSizeMask);
4812 __ add(Operand(ecx), Immediate(String::kLongLengthShift));
4813 // Fetch the length field into the temporary register.
4814 __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
4815 __ shr(temp.reg()); // The shift amount in ecx is implicit operand.
4816 // Check for index out of range.
4817 __ cmp(index.reg(), Operand(temp.reg()));
4818 __ j(greater_equal, &slow_case);
4819 // Reload the instance type (into the temp register this time)..
4820 __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
4821 __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
4822
4823 // We need special handling for non-flat strings.
4824 ASSERT(kSeqStringTag == 0);
4825 __ test(temp.reg(), Immediate(kStringRepresentationMask));
4826 __ j(not_zero, &not_a_flat_string);
4827 // Check for 1-byte or 2-byte string.
4828 __ test(temp.reg(), Immediate(kStringEncodingMask));
4829 __ j(not_zero, &ascii_string);
4830
4831 // 2-byte string.
4832 // Load the 2-byte character code into the temp register.
4833 __ movzx_w(temp.reg(), FieldOperand(object.reg(),
4834 index.reg(),
4835 times_2,
4836 SeqTwoByteString::kHeaderSize));
4837 __ jmp(&got_char_code);
4838
4839 // ASCII string.
4840 __ bind(&ascii_string);
4841 // Load the byte into the temp register.
4842 __ movzx_b(temp.reg(), FieldOperand(object.reg(),
4843 index.reg(),
4844 times_1,
4845 SeqAsciiString::kHeaderSize));
4846 __ bind(&got_char_code);
4847 ASSERT(kSmiTag == 0);
4848 __ shl(temp.reg(), kSmiTagSize);
4849 __ jmp(&end);
4850
4851 // Handle non-flat strings.
4852 __ bind(&not_a_flat_string);
4853 __ and_(temp.reg(), kStringRepresentationMask);
4854 __ cmp(temp.reg(), kConsStringTag);
4855 __ j(equal, &a_cons_string);
4856 __ cmp(temp.reg(), kSlicedStringTag);
4857 __ j(not_equal, &slow_case);
4858
4859 // SlicedString.
4860 // Add the offset to the index and trigger the slow case on overflow.
4861 __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
4862 __ j(overflow, &slow_case);
4863 // Getting the underlying string is done by running the cons string code.
4864
4865 // ConsString.
4866 __ bind(&a_cons_string);
4867 // Get the first of the two strings. Both sliced and cons strings
4868 // store their source string at the same offset.
4869 ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
4870 __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
4871 __ jmp(&try_again_with_new_string);
4872
4873 __ bind(&slow_case);
4874 // Move the undefined value into the result register, which will
4875 // trigger the slow case.
4876 __ Set(temp.reg(), Immediate(Factory::undefined_value()));
4877
4878 __ bind(&end);
4879 frame_->Push(&temp);
4880}
4881
4882
4883void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4884 ASSERT(args->length() == 1);
4885 Load(args->at(0));
4886 Result value = frame_->Pop();
4887 value.ToRegister();
4888 ASSERT(value.is_valid());
4889 __ test(value.reg(), Immediate(kSmiTagMask));
4890 destination()->false_target()->Branch(equal);
4891 // It is a heap object - get map.
4892 Result temp = allocator()->Allocate();
4893 ASSERT(temp.is_valid());
4894 // Check if the object is a JS array or not.
4895 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
4896 value.Unuse();
4897 temp.Unuse();
4898 destination()->Split(equal);
4899}
4900
4901
4902void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
4903 ASSERT(args->length() == 0);
4904
4905 // Get the frame pointer for the calling frame.
4906 Result fp = allocator()->Allocate();
4907 __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
4908
4909 // Skip the arguments adaptor frame if it exists.
4910 Label check_frame_marker;
4911 __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
4912 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4913 __ j(not_equal, &check_frame_marker);
4914 __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
4915
4916 // Check the marker in the calling frame.
4917 __ bind(&check_frame_marker);
4918 __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
4919 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
4920 fp.Unuse();
4921 destination()->Split(equal);
4922}
4923
4924
4925void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
4926 ASSERT(args->length() == 0);
4927 // ArgumentsAccessStub takes the parameter count as an input argument
4928 // in register eax. Create a constant result for it.
4929 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
4930 // Call the shared stub to get to the arguments.length.
4931 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
4932 Result result = frame_->CallStub(&stub, &count);
4933 frame_->Push(&result);
4934}
4935
4936
4937void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4938 ASSERT(args->length() == 1);
4939 JumpTarget leave, null, function, non_function_constructor;
4940 Load(args->at(0)); // Load the object.
4941 Result obj = frame_->Pop();
4942 obj.ToRegister();
4943 frame_->Spill(obj.reg());
4944
4945 // If the object is a smi, we return null.
4946 __ test(obj.reg(), Immediate(kSmiTagMask));
4947 null.Branch(zero);
4948
4949 // Check that the object is a JS object but take special care of JS
4950 // functions to make sure they have 'Function' as their class.
4951 { Result tmp = allocator()->Allocate();
4952 __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
4953 __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
4954 __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
4955 null.Branch(less);
4956
4957 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4958 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4959 // LAST_JS_OBJECT_TYPE.
4960 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4961 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4962 __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
4963 function.Branch(equal);
4964 }
4965
4966 // Check if the constructor in the map is a function.
4967 { Result tmp = allocator()->Allocate();
4968 __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4969 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
4970 non_function_constructor.Branch(not_equal);
4971 }
4972
4973 // The map register now contains the constructor function. Grab the
4974 // instance class name from there.
4975 __ mov(obj.reg(),
4976 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4977 __ mov(obj.reg(),
4978 FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
4979 frame_->Push(&obj);
4980 leave.Jump();
4981
4982 // Functions have class 'Function'.
4983 function.Bind();
4984 frame_->Push(Factory::function_class_symbol());
4985 leave.Jump();
4986
4987 // Objects with a non-function constructor have class 'Object'.
4988 non_function_constructor.Bind();
4989 frame_->Push(Factory::Object_symbol());
4990 leave.Jump();
4991
4992 // Non-JS objects have class null.
4993 null.Bind();
4994 frame_->Push(Factory::null_value());
4995
4996 // All done.
4997 leave.Bind();
4998}
4999
5000
5001void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
5002 ASSERT(args->length() == 1);
5003 JumpTarget leave;
5004 Load(args->at(0)); // Load the object.
5005 frame_->Dup();
5006 Result object = frame_->Pop();
5007 object.ToRegister();
5008 ASSERT(object.is_valid());
5009 // if (object->IsSmi()) return object.
5010 __ test(object.reg(), Immediate(kSmiTagMask));
5011 leave.Branch(zero, taken);
5012 // It is a heap object - get map.
5013 Result temp = allocator()->Allocate();
5014 ASSERT(temp.is_valid());
5015 // if (!object->IsJSValue()) return object.
5016 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
5017 leave.Branch(not_equal, not_taken);
5018 __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
5019 object.Unuse();
5020 frame_->SetElementAt(0, &temp);
5021 leave.Bind();
5022}
5023
5024
5025void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
5026 ASSERT(args->length() == 2);
5027 JumpTarget leave;
5028 Load(args->at(0)); // Load the object.
5029 Load(args->at(1)); // Load the value.
5030 Result value = frame_->Pop();
5031 Result object = frame_->Pop();
5032 value.ToRegister();
5033 object.ToRegister();
5034
5035 // if (object->IsSmi()) return value.
5036 __ test(object.reg(), Immediate(kSmiTagMask));
5037 leave.Branch(zero, &value, taken);
5038
5039 // It is a heap object - get its map.
5040 Result scratch = allocator_->Allocate();
5041 ASSERT(scratch.is_valid());
5042 // if (!object->IsJSValue()) return value.
5043 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
5044 leave.Branch(not_equal, &value, not_taken);
5045
5046 // Store the value.
5047 __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
5048 // Update the write barrier. Save the value as it will be
5049 // overwritten by the write barrier code and is needed afterward.
5050 Result duplicate_value = allocator_->Allocate();
5051 ASSERT(duplicate_value.is_valid());
5052 __ mov(duplicate_value.reg(), value.reg());
5053 // The object register is also overwritten by the write barrier and
5054 // possibly aliased in the frame.
5055 frame_->Spill(object.reg());
5056 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
5057 scratch.reg());
5058 object.Unuse();
5059 scratch.Unuse();
5060 duplicate_value.Unuse();
5061
5062 // Leave.
5063 leave.Bind(&value);
5064 frame_->Push(&value);
5065}
5066
5067
5068void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
5069 ASSERT(args->length() == 1);
5070
5071 // ArgumentsAccessStub expects the key in edx and the formal
5072 // parameter count in eax.
5073 Load(args->at(0));
5074 Result key = frame_->Pop();
5075 // Explicitly create a constant result.
5076 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
5077 // Call the shared stub to get to arguments[key].
5078 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
5079 Result result = frame_->CallStub(&stub, &key, &count);
5080 frame_->Push(&result);
5081}
5082
5083
5084void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5085 ASSERT(args->length() == 2);
5086
5087 // Load the two objects into registers and perform the comparison.
5088 Load(args->at(0));
5089 Load(args->at(1));
5090 Result right = frame_->Pop();
5091 Result left = frame_->Pop();
5092 right.ToRegister();
5093 left.ToRegister();
5094 __ cmp(right.reg(), Operand(left.reg()));
5095 right.Unuse();
5096 left.Unuse();
5097 destination()->Split(equal);
5098}
5099
5100
5101void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
5102 ASSERT(args->length() == 0);
5103 ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
5104 Result ebp_as_smi = allocator_->Allocate();
5105 ASSERT(ebp_as_smi.is_valid());
5106 __ mov(ebp_as_smi.reg(), Operand(ebp));
5107 frame_->Push(&ebp_as_smi);
5108}
5109
5110
5111void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
5112 ASSERT(args->length() == 0);
5113 frame_->SpillAll();
5114
5115 // Make sure the frame is aligned like the OS expects.
5116 static const int kFrameAlignment = OS::ActivationFrameAlignment();
5117 if (kFrameAlignment > 0) {
5118 ASSERT(IsPowerOf2(kFrameAlignment));
5119 __ mov(edi, Operand(esp)); // Save in callee-saved register.
5120 __ and_(esp, -kFrameAlignment);
5121 }
5122
5123 // Call V8::RandomPositiveSmi().
5124 __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
5125
5126 // Restore stack pointer from callee-saved register edi.
5127 if (kFrameAlignment > 0) {
5128 __ mov(esp, Operand(edi));
5129 }
5130
5131 Result result = allocator_->Allocate(eax);
5132 frame_->Push(&result);
5133}
5134
5135
5136void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
5137 JumpTarget done;
5138 JumpTarget call_runtime;
5139 ASSERT(args->length() == 1);
5140
5141 // Load number and duplicate it.
5142 Load(args->at(0));
5143 frame_->Dup();
5144
5145 // Get the number into an unaliased register and load it onto the
5146 // floating point stack still leaving one copy on the frame.
5147 Result number = frame_->Pop();
5148 number.ToRegister();
5149 frame_->Spill(number.reg());
5150 FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
5151 number.Unuse();
5152
5153 // Perform the operation on the number.
5154 switch (op) {
5155 case SIN:
5156 __ fsin();
5157 break;
5158 case COS:
5159 __ fcos();
5160 break;
5161 }
5162
5163 // Go slow case if argument to operation is out of range.
5164 Result eax_reg = allocator_->Allocate(eax);
5165 ASSERT(eax_reg.is_valid());
5166 __ fnstsw_ax();
5167 __ sahf();
5168 eax_reg.Unuse();
5169 call_runtime.Branch(parity_even, not_taken);
5170
5171 // Allocate heap number for result if possible.
5172 Result scratch1 = allocator()->Allocate();
5173 Result scratch2 = allocator()->Allocate();
5174 Result heap_number = allocator()->Allocate();
5175 FloatingPointHelper::AllocateHeapNumber(masm_,
5176 call_runtime.entry_label(),
5177 scratch1.reg(),
5178 scratch2.reg(),
5179 heap_number.reg());
5180 scratch1.Unuse();
5181 scratch2.Unuse();
5182
5183 // Store the result in the allocated heap number.
5184 __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
5185 // Replace the extra copy of the argument with the result.
5186 frame_->SetElementAt(0, &heap_number);
5187 done.Jump();
5188
5189 call_runtime.Bind();
5190 // Free ST(0) which was not popped before calling into the runtime.
5191 __ ffree(0);
5192 Result answer;
5193 switch (op) {
5194 case SIN:
5195 answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
5196 break;
5197 case COS:
5198 answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
5199 break;
5200 }
5201 frame_->Push(&answer);
5202 done.Bind();
5203}
5204
5205
5206void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5207 if (CheckForInlineRuntimeCall(node)) {
5208 return;
5209 }
5210
5211 ZoneList<Expression*>* args = node->arguments();
5212 Comment cmnt(masm_, "[ CallRuntime");
5213 Runtime::Function* function = node->function();
5214
5215 if (function == NULL) {
5216 // Prepare stack for calling JS runtime function.
5217 frame_->Push(node->name());
5218 // Push the builtins object found in the current global object.
5219 Result temp = allocator()->Allocate();
5220 ASSERT(temp.is_valid());
5221 __ mov(temp.reg(), GlobalObject());
5222 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
5223 frame_->Push(&temp);
5224 }
5225
5226 // Push the arguments ("left-to-right").
5227 int arg_count = args->length();
5228 for (int i = 0; i < arg_count; i++) {
5229 Load(args->at(i));
5230 }
5231
5232 if (function == NULL) {
5233 // Call the JS runtime function.
5234 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
5235 arg_count,
5236 loop_nesting_);
5237 frame_->RestoreContextRegister();
5238 frame_->SetElementAt(0, &answer);
5239 } else {
5240 // Call the C runtime function.
5241 Result answer = frame_->CallRuntime(function, arg_count);
5242 frame_->Push(&answer);
5243 }
5244}
5245
5246
5247void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5248 // Note that because of NOT and an optimization in comparison of a typeof
5249 // expression to a literal string, this function can fail to leave a value
5250 // on top of the frame or in the cc register.
5251 Comment cmnt(masm_, "[ UnaryOperation");
5252
5253 Token::Value op = node->op();
5254
5255 if (op == Token::NOT) {
5256 // Swap the true and false targets but keep the same actual label
5257 // as the fall through.
5258 destination()->Invert();
5259 LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
5260 // Swap the labels back.
5261 destination()->Invert();
5262
5263 } else if (op == Token::DELETE) {
5264 Property* property = node->expression()->AsProperty();
5265 if (property != NULL) {
5266 Load(property->obj());
5267 Load(property->key());
5268 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
5269 frame_->Push(&answer);
5270 return;
5271 }
5272
5273 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5274 if (variable != NULL) {
5275 Slot* slot = variable->slot();
5276 if (variable->is_global()) {
5277 LoadGlobal();
5278 frame_->Push(variable->name());
5279 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
5280 CALL_FUNCTION, 2);
5281 frame_->Push(&answer);
5282 return;
5283
5284 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5285 // Call the runtime to look up the context holding the named
5286 // variable. Sync the virtual frame eagerly so we can push the
5287 // arguments directly into place.
5288 frame_->SyncRange(0, frame_->element_count() - 1);
5289 frame_->EmitPush(esi);
5290 frame_->EmitPush(Immediate(variable->name()));
5291 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
5292 ASSERT(context.is_register());
5293 frame_->EmitPush(context.reg());
5294 context.Unuse();
5295 frame_->EmitPush(Immediate(variable->name()));
5296 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
5297 CALL_FUNCTION, 2);
5298 frame_->Push(&answer);
5299 return;
5300 }
5301
5302 // Default: Result of deleting non-global, not dynamically
5303 // introduced variables is false.
5304 frame_->Push(Factory::false_value());
5305
5306 } else {
5307 // Default: Result of deleting expressions is true.
5308 Load(node->expression()); // may have side-effects
5309 frame_->SetElementAt(0, Factory::true_value());
5310 }
5311
5312 } else if (op == Token::TYPEOF) {
5313 // Special case for loading the typeof expression; see comment on
5314 // LoadTypeofExpression().
5315 LoadTypeofExpression(node->expression());
5316 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
5317 frame_->Push(&answer);
5318
5319 } else if (op == Token::VOID) {
5320 Expression* expression = node->expression();
5321 if (expression && expression->AsLiteral() && (
5322 expression->AsLiteral()->IsTrue() ||
5323 expression->AsLiteral()->IsFalse() ||
5324 expression->AsLiteral()->handle()->IsNumber() ||
5325 expression->AsLiteral()->handle()->IsString() ||
5326 expression->AsLiteral()->handle()->IsJSRegExp() ||
5327 expression->AsLiteral()->IsNull())) {
5328 // Omit evaluating the value of the primitive literal.
5329 // It will be discarded anyway, and can have no side effect.
5330 frame_->Push(Factory::undefined_value());
5331 } else {
5332 Load(node->expression());
5333 frame_->SetElementAt(0, Factory::undefined_value());
5334 }
5335
5336 } else {
5337 Load(node->expression());
5338 switch (op) {
5339 case Token::SUB: {
5340 bool overwrite =
5341 (node->AsBinaryOperation() != NULL &&
5342 node->AsBinaryOperation()->ResultOverwriteAllowed());
5343 UnarySubStub stub(overwrite);
5344 // TODO(1222589): remove dependency of TOS being cached inside stub
5345 Result operand = frame_->Pop();
5346 Result answer = frame_->CallStub(&stub, &operand);
5347 frame_->Push(&answer);
5348 break;
5349 }
5350
5351 case Token::BIT_NOT: {
5352 // Smi check.
5353 JumpTarget smi_label;
5354 JumpTarget continue_label;
5355 Result operand = frame_->Pop();
5356 operand.ToRegister();
5357 __ test(operand.reg(), Immediate(kSmiTagMask));
5358 smi_label.Branch(zero, &operand, taken);
5359
5360 frame_->Push(&operand); // undo popping of TOS
5361 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
5362 CALL_FUNCTION, 1);
5363
5364 continue_label.Jump(&answer);
5365 smi_label.Bind(&answer);
5366 answer.ToRegister();
5367 frame_->Spill(answer.reg());
5368 __ not_(answer.reg());
5369 __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
5370 continue_label.Bind(&answer);
5371 frame_->Push(&answer);
5372 break;
5373 }
5374
5375 case Token::ADD: {
5376 // Smi check.
5377 JumpTarget continue_label;
5378 Result operand = frame_->Pop();
5379 operand.ToRegister();
5380 __ test(operand.reg(), Immediate(kSmiTagMask));
5381 continue_label.Branch(zero, &operand, taken);
5382
5383 frame_->Push(&operand);
5384 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
5385 CALL_FUNCTION, 1);
5386
5387 continue_label.Bind(&answer);
5388 frame_->Push(&answer);
5389 break;
5390 }
5391
5392 default:
5393 // NOT, DELETE, TYPEOF, and VOID are handled outside the
5394 // switch.
5395 UNREACHABLE();
5396 }
5397 }
5398}
5399
5400
5401// The value in dst was optimistically incremented or decremented. The
5402// result overflowed or was not smi tagged. Undo the operation, call
5403// into the runtime to convert the argument to a number, and call the
5404// specialized add or subtract stub. The result is left in dst.
5405class DeferredPrefixCountOperation: public DeferredCode {
5406 public:
5407 DeferredPrefixCountOperation(Register dst, bool is_increment)
5408 : dst_(dst), is_increment_(is_increment) {
5409 set_comment("[ DeferredCountOperation");
5410 }
5411
5412 virtual void Generate();
5413
5414 private:
5415 Register dst_;
5416 bool is_increment_;
5417};
5418
5419
5420void DeferredPrefixCountOperation::Generate() {
5421 // Undo the optimistic smi operation.
5422 if (is_increment_) {
5423 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
5424 } else {
5425 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
5426 }
5427 __ push(dst_);
5428 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
5429 __ push(eax);
5430 __ push(Immediate(Smi::FromInt(1)));
5431 if (is_increment_) {
5432 __ CallRuntime(Runtime::kNumberAdd, 2);
5433 } else {
5434 __ CallRuntime(Runtime::kNumberSub, 2);
5435 }
5436 if (!dst_.is(eax)) __ mov(dst_, eax);
5437}
5438
5439
5440// The value in dst was optimistically incremented or decremented. The
5441// result overflowed or was not smi tagged. Undo the operation and call
5442// into the runtime to convert the argument to a number. Update the
5443// original value in old. Call the specialized add or subtract stub.
5444// The result is left in dst.
5445class DeferredPostfixCountOperation: public DeferredCode {
5446 public:
5447 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
5448 : dst_(dst), old_(old), is_increment_(is_increment) {
5449 set_comment("[ DeferredCountOperation");
5450 }
5451
5452 virtual void Generate();
5453
5454 private:
5455 Register dst_;
5456 Register old_;
5457 bool is_increment_;
5458};
5459
5460
5461void DeferredPostfixCountOperation::Generate() {
5462 // Undo the optimistic smi operation.
5463 if (is_increment_) {
5464 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
5465 } else {
5466 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
5467 }
5468 __ push(dst_);
5469 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
5470
5471 // Save the result of ToNumber to use as the old value.
5472 __ push(eax);
5473
5474 // Call the runtime for the addition or subtraction.
5475 __ push(eax);
5476 __ push(Immediate(Smi::FromInt(1)));
5477 if (is_increment_) {
5478 __ CallRuntime(Runtime::kNumberAdd, 2);
5479 } else {
5480 __ CallRuntime(Runtime::kNumberSub, 2);
5481 }
5482 if (!dst_.is(eax)) __ mov(dst_, eax);
5483 __ pop(old_);
5484}
5485
5486
5487void CodeGenerator::VisitCountOperation(CountOperation* node) {
5488 Comment cmnt(masm_, "[ CountOperation");
5489
5490 bool is_postfix = node->is_postfix();
5491 bool is_increment = node->op() == Token::INC;
5492
5493 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5494 bool is_const = (var != NULL && var->mode() == Variable::CONST);
5495
5496 // Postfix operations need a stack slot under the reference to hold
5497 // the old value while the new value is being stored. This is so that
5498 // in the case that storing the new value requires a call, the old
5499 // value will be in the frame to be spilled.
5500 if (is_postfix) frame_->Push(Smi::FromInt(0));
5501
5502 { Reference target(this, node->expression());
5503 if (target.is_illegal()) {
5504 // Spoof the virtual frame to have the expected height (one higher
5505 // than on entry).
5506 if (!is_postfix) frame_->Push(Smi::FromInt(0));
5507 return;
5508 }
5509 target.TakeValue(NOT_INSIDE_TYPEOF);
5510
5511 Result new_value = frame_->Pop();
5512 new_value.ToRegister();
5513
5514 Result old_value; // Only allocated in the postfix case.
5515 if (is_postfix) {
5516 // Allocate a temporary to preserve the old value.
5517 old_value = allocator_->Allocate();
5518 ASSERT(old_value.is_valid());
5519 __ mov(old_value.reg(), new_value.reg());
5520 }
5521 // Ensure the new value is writable.
5522 frame_->Spill(new_value.reg());
5523
5524 // In order to combine the overflow and the smi tag check, we need
5525 // to be able to allocate a byte register. We attempt to do so
5526 // without spilling. If we fail, we will generate separate overflow
5527 // and smi tag checks.
5528 //
5529 // We allocate and clear the temporary byte register before
5530 // performing the count operation since clearing the register using
5531 // xor will clear the overflow flag.
5532 Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
5533 if (tmp.is_valid()) {
5534 __ Set(tmp.reg(), Immediate(0));
5535 }
5536
5537 DeferredCode* deferred = NULL;
5538 if (is_postfix) {
5539 deferred = new DeferredPostfixCountOperation(new_value.reg(),
5540 old_value.reg(),
5541 is_increment);
5542 } else {
5543 deferred = new DeferredPrefixCountOperation(new_value.reg(),
5544 is_increment);
5545 }
5546
5547 if (is_increment) {
5548 __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
5549 } else {
5550 __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
5551 }
5552
5553 // If the count operation didn't overflow and the result is a valid
5554 // smi, we're done. Otherwise, we jump to the deferred slow-case
5555 // code.
5556 if (tmp.is_valid()) {
5557 // We combine the overflow and the smi tag check if we could
5558 // successfully allocate a temporary byte register.
5559 __ setcc(overflow, tmp.reg());
5560 __ or_(Operand(tmp.reg()), new_value.reg());
5561 __ test(tmp.reg(), Immediate(kSmiTagMask));
5562 tmp.Unuse();
5563 deferred->Branch(not_zero);
5564 } else {
5565 // Otherwise we test separately for overflow and smi tag.
5566 deferred->Branch(overflow);
5567 __ test(new_value.reg(), Immediate(kSmiTagMask));
5568 deferred->Branch(not_zero);
5569 }
5570 deferred->BindExit();
5571
5572 // Postfix: store the old value in the allocated slot under the
5573 // reference.
5574 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
5575
5576 frame_->Push(&new_value);
5577 // Non-constant: update the reference.
5578 if (!is_const) target.SetValue(NOT_CONST_INIT);
5579 }
5580
5581 // Postfix: drop the new value and use the old.
5582 if (is_postfix) frame_->Drop();
5583}
5584
5585
5586void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5587 // Note that due to an optimization in comparison operations (typeof
5588 // compared to a string literal), we can evaluate a binary expression such
5589 // as AND or OR and not leave a value on the frame or in the cc register.
5590 Comment cmnt(masm_, "[ BinaryOperation");
5591 Token::Value op = node->op();
5592
5593 // According to ECMA-262 section 11.11, page 58, the binary logical
5594 // operators must yield the result of one of the two expressions
5595 // before any ToBoolean() conversions. This means that the value
5596 // produced by a && or || operator is not necessarily a boolean.
5597
5598 // NOTE: If the left hand side produces a materialized value (not
5599 // control flow), we force the right hand side to do the same. This
5600 // is necessary because we assume that if we get control flow on the
5601 // last path out of an expression we got it on all paths.
5602 if (op == Token::AND) {
5603 JumpTarget is_true;
5604 ControlDestination dest(&is_true, destination()->false_target(), true);
5605 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
5606
5607 if (dest.false_was_fall_through()) {
5608 // The current false target was used as the fall-through. If
5609 // there are no dangling jumps to is_true then the left
5610 // subexpression was unconditionally false. Otherwise we have
5611 // paths where we do have to evaluate the right subexpression.
5612 if (is_true.is_linked()) {
5613 // We need to compile the right subexpression. If the jump to
5614 // the current false target was a forward jump then we have a
5615 // valid frame, we have just bound the false target, and we
5616 // have to jump around the code for the right subexpression.
5617 if (has_valid_frame()) {
5618 destination()->false_target()->Unuse();
5619 destination()->false_target()->Jump();
5620 }
5621 is_true.Bind();
5622 // The left subexpression compiled to control flow, so the
5623 // right one is free to do so as well.
5624 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
5625 } else {
5626 // We have actually just jumped to or bound the current false
5627 // target but the current control destination is not marked as
5628 // used.
5629 destination()->Use(false);
5630 }
5631
5632 } else if (dest.is_used()) {
5633 // The left subexpression compiled to control flow (and is_true
5634 // was just bound), so the right is free to do so as well.
5635 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
5636
5637 } else {
5638 // We have a materialized value on the frame, so we exit with
5639 // one on all paths. There are possibly also jumps to is_true
5640 // from nested subexpressions.
5641 JumpTarget pop_and_continue;
5642 JumpTarget exit;
5643
5644 // Avoid popping the result if it converts to 'false' using the
5645 // standard ToBoolean() conversion as described in ECMA-262,
5646 // section 9.2, page 30.
5647 //
5648 // Duplicate the TOS value. The duplicate will be popped by
5649 // ToBoolean.
5650 frame_->Dup();
5651 ControlDestination dest(&pop_and_continue, &exit, true);
5652 ToBoolean(&dest);
5653
5654 // Pop the result of evaluating the first part.
5655 frame_->Drop();
5656
5657 // Compile right side expression.
5658 is_true.Bind();
5659 Load(node->right());
5660
5661 // Exit (always with a materialized value).
5662 exit.Bind();
5663 }
5664
5665 } else if (op == Token::OR) {
5666 JumpTarget is_false;
5667 ControlDestination dest(destination()->true_target(), &is_false, false);
5668 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
5669
5670 if (dest.true_was_fall_through()) {
5671 // The current true target was used as the fall-through. If
5672 // there are no dangling jumps to is_false then the left
5673 // subexpression was unconditionally true. Otherwise we have
5674 // paths where we do have to evaluate the right subexpression.
5675 if (is_false.is_linked()) {
5676 // We need to compile the right subexpression. If the jump to
5677 // the current true target was a forward jump then we have a
5678 // valid frame, we have just bound the true target, and we
5679 // have to jump around the code for the right subexpression.
5680 if (has_valid_frame()) {
5681 destination()->true_target()->Unuse();
5682 destination()->true_target()->Jump();
5683 }
5684 is_false.Bind();
5685 // The left subexpression compiled to control flow, so the
5686 // right one is free to do so as well.
5687 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
5688 } else {
5689 // We have just jumped to or bound the current true target but
5690 // the current control destination is not marked as used.
5691 destination()->Use(true);
5692 }
5693
5694 } else if (dest.is_used()) {
5695 // The left subexpression compiled to control flow (and is_false
5696 // was just bound), so the right is free to do so as well.
5697 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
5698
5699 } else {
5700 // We have a materialized value on the frame, so we exit with
5701 // one on all paths. There are possibly also jumps to is_false
5702 // from nested subexpressions.
5703 JumpTarget pop_and_continue;
5704 JumpTarget exit;
5705
5706 // Avoid popping the result if it converts to 'true' using the
5707 // standard ToBoolean() conversion as described in ECMA-262,
5708 // section 9.2, page 30.
5709 //
5710 // Duplicate the TOS value. The duplicate will be popped by
5711 // ToBoolean.
5712 frame_->Dup();
5713 ControlDestination dest(&exit, &pop_and_continue, false);
5714 ToBoolean(&dest);
5715
5716 // Pop the result of evaluating the first part.
5717 frame_->Drop();
5718
5719 // Compile right side expression.
5720 is_false.Bind();
5721 Load(node->right());
5722
5723 // Exit (always with a materialized value).
5724 exit.Bind();
5725 }
5726
5727 } else {
5728 // NOTE: The code below assumes that the slow cases (calls to runtime)
5729 // never return a constant/immutable object.
5730 OverwriteMode overwrite_mode = NO_OVERWRITE;
5731 if (node->left()->AsBinaryOperation() != NULL &&
5732 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
5733 overwrite_mode = OVERWRITE_LEFT;
5734 } else if (node->right()->AsBinaryOperation() != NULL &&
5735 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
5736 overwrite_mode = OVERWRITE_RIGHT;
5737 }
5738
5739 Load(node->left());
5740 Load(node->right());
5741 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
5742 }
5743}
5744
5745
5746void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5747 frame_->PushFunction();
5748}
5749
5750
5751void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5752 Comment cmnt(masm_, "[ CompareOperation");
5753
5754 // Get the expressions from the node.
5755 Expression* left = node->left();
5756 Expression* right = node->right();
5757 Token::Value op = node->op();
5758 // To make typeof testing for natives implemented in JavaScript really
5759 // efficient, we generate special code for expressions of the form:
5760 // 'typeof <expression> == <string>'.
5761 UnaryOperation* operation = left->AsUnaryOperation();
5762 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5763 (operation != NULL && operation->op() == Token::TYPEOF) &&
5764 (right->AsLiteral() != NULL &&
5765 right->AsLiteral()->handle()->IsString())) {
5766 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5767
5768 // Load the operand and move it to a register.
5769 LoadTypeofExpression(operation->expression());
5770 Result answer = frame_->Pop();
5771 answer.ToRegister();
5772
5773 if (check->Equals(Heap::number_symbol())) {
5774 __ test(answer.reg(), Immediate(kSmiTagMask));
5775 destination()->true_target()->Branch(zero);
5776 frame_->Spill(answer.reg());
5777 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5778 __ cmp(answer.reg(), Factory::heap_number_map());
5779 answer.Unuse();
5780 destination()->Split(equal);
5781
5782 } else if (check->Equals(Heap::string_symbol())) {
5783 __ test(answer.reg(), Immediate(kSmiTagMask));
5784 destination()->false_target()->Branch(zero);
5785
5786 // It can be an undetectable string object.
5787 Result temp = allocator()->Allocate();
5788 ASSERT(temp.is_valid());
5789 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5790 __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
5791 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
5792 destination()->false_target()->Branch(not_zero);
5793 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5794 __ movzx_b(temp.reg(),
5795 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
5796 __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
5797 temp.Unuse();
5798 answer.Unuse();
5799 destination()->Split(less);
5800
5801 } else if (check->Equals(Heap::boolean_symbol())) {
5802 __ cmp(answer.reg(), Factory::true_value());
5803 destination()->true_target()->Branch(equal);
5804 __ cmp(answer.reg(), Factory::false_value());
5805 answer.Unuse();
5806 destination()->Split(equal);
5807
5808 } else if (check->Equals(Heap::undefined_symbol())) {
5809 __ cmp(answer.reg(), Factory::undefined_value());
5810 destination()->true_target()->Branch(equal);
5811
5812 __ test(answer.reg(), Immediate(kSmiTagMask));
5813 destination()->false_target()->Branch(zero);
5814
5815 // It can be an undetectable object.
5816 frame_->Spill(answer.reg());
5817 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5818 __ movzx_b(answer.reg(),
5819 FieldOperand(answer.reg(), Map::kBitFieldOffset));
5820 __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
5821 answer.Unuse();
5822 destination()->Split(not_zero);
5823
5824 } else if (check->Equals(Heap::function_symbol())) {
5825 __ test(answer.reg(), Immediate(kSmiTagMask));
5826 destination()->false_target()->Branch(zero);
5827 frame_->Spill(answer.reg());
5828 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
5829 answer.Unuse();
5830 destination()->Split(equal);
5831
5832 } else if (check->Equals(Heap::object_symbol())) {
5833 __ test(answer.reg(), Immediate(kSmiTagMask));
5834 destination()->false_target()->Branch(zero);
5835 __ cmp(answer.reg(), Factory::null_value());
5836 destination()->true_target()->Branch(equal);
5837
5838 // It can be an undetectable object.
5839 Result map = allocator()->Allocate();
5840 ASSERT(map.is_valid());
5841 __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5842 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
5843 __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
5844 destination()->false_target()->Branch(not_zero);
5845 __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
5846 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
5847 __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
5848 destination()->false_target()->Branch(less);
5849 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
5850 answer.Unuse();
5851 map.Unuse();
5852 destination()->Split(less_equal);
5853 } else {
5854 // Uncommon case: typeof testing against a string literal that is
5855 // never returned from the typeof operator.
5856 answer.Unuse();
5857 destination()->Goto(false);
5858 }
5859 return;
5860 }
5861
5862 Condition cc = no_condition;
5863 bool strict = false;
5864 switch (op) {
5865 case Token::EQ_STRICT:
5866 strict = true;
5867 // Fall through
5868 case Token::EQ:
5869 cc = equal;
5870 break;
5871 case Token::LT:
5872 cc = less;
5873 break;
5874 case Token::GT:
5875 cc = greater;
5876 break;
5877 case Token::LTE:
5878 cc = less_equal;
5879 break;
5880 case Token::GTE:
5881 cc = greater_equal;
5882 break;
5883 case Token::IN: {
5884 Load(left);
5885 Load(right);
5886 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
5887 frame_->Push(&answer); // push the result
5888 return;
5889 }
5890 case Token::INSTANCEOF: {
5891 Load(left);
5892 Load(right);
5893 InstanceofStub stub;
5894 Result answer = frame_->CallStub(&stub, 2);
5895 answer.ToRegister();
5896 __ test(answer.reg(), Operand(answer.reg()));
5897 answer.Unuse();
5898 destination()->Split(zero);
5899 return;
5900 }
5901 default:
5902 UNREACHABLE();
5903 }
5904 Load(left);
5905 Load(right);
5906 Comparison(cc, strict, destination());
5907}
5908
5909
5910#ifdef DEBUG
5911bool CodeGenerator::HasValidEntryRegisters() {
5912 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
5913 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
5914 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
5915 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
5916 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
5917}
5918#endif
5919
5920
5921// Emit a LoadIC call to get the value from receiver and leave it in
5922// dst. The receiver register is restored after the call.
5923class DeferredReferenceGetNamedValue: public DeferredCode {
5924 public:
5925 DeferredReferenceGetNamedValue(Register dst,
5926 Register receiver,
5927 Handle<String> name)
5928 : dst_(dst), receiver_(receiver), name_(name) {
5929 set_comment("[ DeferredReferenceGetNamedValue");
5930 }
5931
5932 virtual void Generate();
5933
5934 Label* patch_site() { return &patch_site_; }
5935
5936 private:
5937 Label patch_site_;
5938 Register dst_;
5939 Register receiver_;
5940 Handle<String> name_;
5941};
5942
5943
5944void DeferredReferenceGetNamedValue::Generate() {
5945 __ push(receiver_);
5946 __ Set(ecx, Immediate(name_));
5947 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5948 __ call(ic, RelocInfo::CODE_TARGET);
5949 // The call must be followed by a test eax instruction to indicate
5950 // that the inobject property case was inlined.
5951 //
5952 // Store the delta to the map check instruction here in the test
5953 // instruction. Use masm_-> instead of the __ macro since the
5954 // latter can't return a value.
5955 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5956 // Here we use masm_-> instead of the __ macro because this is the
5957 // instruction that gets patched and coverage code gets in the way.
5958 masm_->test(eax, Immediate(-delta_to_patch_site));
5959 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5960
5961 if (!dst_.is(eax)) __ mov(dst_, eax);
5962 __ pop(receiver_);
5963}
5964
5965
5966class DeferredReferenceGetKeyedValue: public DeferredCode {
5967 public:
5968 explicit DeferredReferenceGetKeyedValue(Register dst,
5969 Register receiver,
5970 Register key,
5971 bool is_global)
5972 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
5973 set_comment("[ DeferredReferenceGetKeyedValue");
5974 }
5975
5976 virtual void Generate();
5977
5978 Label* patch_site() { return &patch_site_; }
5979
5980 private:
5981 Label patch_site_;
5982 Register dst_;
5983 Register receiver_;
5984 Register key_;
5985 bool is_global_;
5986};
5987
5988
5989void DeferredReferenceGetKeyedValue::Generate() {
5990 __ push(receiver_); // First IC argument.
5991 __ push(key_); // Second IC argument.
5992
5993 // Calculate the delta from the IC call instruction to the map check
5994 // cmp instruction in the inlined version. This delta is stored in
5995 // a test(eax, delta) instruction after the call so that we can find
5996 // it in the IC initialization code and patch the cmp instruction.
5997 // This means that we cannot allow test instructions after calls to
5998 // KeyedLoadIC stubs in other places.
5999 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6000 RelocInfo::Mode mode = is_global_
6001 ? RelocInfo::CODE_TARGET_CONTEXT
6002 : RelocInfo::CODE_TARGET;
6003 __ call(ic, mode);
6004 // The delta from the start of the map-compare instruction to the
6005 // test instruction. We use masm_-> directly here instead of the __
6006 // macro because the macro sometimes uses macro expansion to turn
6007 // into something that can't return a value. This is encountered
6008 // when doing generated code coverage tests.
6009 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
6010 // Here we use masm_-> instead of the __ macro because this is the
6011 // instruction that gets patched and coverage code gets in the way.
6012 masm_->test(eax, Immediate(-delta_to_patch_site));
6013 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
6014
6015 if (!dst_.is(eax)) __ mov(dst_, eax);
6016 __ pop(key_);
6017 __ pop(receiver_);
6018}
6019
6020
6021class DeferredReferenceSetKeyedValue: public DeferredCode {
6022 public:
6023 DeferredReferenceSetKeyedValue(Register value,
6024 Register key,
6025 Register receiver)
6026 : value_(value), key_(key), receiver_(receiver) {
6027 set_comment("[ DeferredReferenceSetKeyedValue");
6028 }
6029
6030 virtual void Generate();
6031
6032 Label* patch_site() { return &patch_site_; }
6033
6034 private:
6035 Register value_;
6036 Register key_;
6037 Register receiver_;
6038 Label patch_site_;
6039};
6040
6041
6042void DeferredReferenceSetKeyedValue::Generate() {
6043 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
6044 // Push receiver and key arguments on the stack.
6045 __ push(receiver_);
6046 __ push(key_);
6047 // Move value argument to eax as expected by the IC stub.
6048 if (!value_.is(eax)) __ mov(eax, value_);
6049 // Call the IC stub.
6050 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6051 __ call(ic, RelocInfo::CODE_TARGET);
6052 // The delta from the start of the map-compare instruction to the
6053 // test instruction. We use masm_-> directly here instead of the
6054 // __ macro because the macro sometimes uses macro expansion to turn
6055 // into something that can't return a value. This is encountered
6056 // when doing generated code coverage tests.
6057 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
6058 // Here we use masm_-> instead of the __ macro because this is the
6059 // instruction that gets patched and coverage code gets in the way.
6060 masm_->test(eax, Immediate(-delta_to_patch_site));
6061 // Restore value (returned from store IC), key and receiver
6062 // registers.
6063 if (!value_.is(eax)) __ mov(value_, eax);
6064 __ pop(key_);
6065 __ pop(receiver_);
6066}
6067
6068
6069#undef __
6070#define __ ACCESS_MASM(masm)
6071
6072
6073Handle<String> Reference::GetName() {
6074 ASSERT(type_ == NAMED);
6075 Property* property = expression_->AsProperty();
6076 if (property == NULL) {
6077 // Global variable reference treated as a named property reference.
6078 VariableProxy* proxy = expression_->AsVariableProxy();
6079 ASSERT(proxy->AsVariable() != NULL);
6080 ASSERT(proxy->AsVariable()->is_global());
6081 return proxy->name();
6082 } else {
6083 Literal* raw_name = property->key()->AsLiteral();
6084 ASSERT(raw_name != NULL);
6085 return Handle<String>(String::cast(*raw_name->handle()));
6086 }
6087}
6088
6089
6090void Reference::GetValue(TypeofState typeof_state) {
6091 ASSERT(!cgen_->in_spilled_code());
6092 ASSERT(cgen_->HasValidEntryRegisters());
6093 ASSERT(!is_illegal());
6094 MacroAssembler* masm = cgen_->masm();
6095
6096 // Record the source position for the property load.
6097 Property* property = expression_->AsProperty();
6098 if (property != NULL) {
6099 cgen_->CodeForSourcePosition(property->position());
6100 }
6101
6102 switch (type_) {
6103 case SLOT: {
6104 Comment cmnt(masm, "[ Load from Slot");
6105 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6106 ASSERT(slot != NULL);
6107 cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
6108 break;
6109 }
6110
6111 case NAMED: {
6112 // TODO(1241834): Make sure that it is safe to ignore the
6113 // distinction between expressions in a typeof and not in a
6114 // typeof. If there is a chance that reference errors can be
6115 // thrown below, we must distinguish between the two kinds of
6116 // loads (typeof expression loads must not throw a reference
6117 // error).
6118 Variable* var = expression_->AsVariableProxy()->AsVariable();
6119 bool is_global = var != NULL;
6120 ASSERT(!is_global || var->is_global());
6121
6122 // Do not inline the inobject property case for loads from the global
6123 // object. Also do not inline for unoptimized code. This saves time
6124 // in the code generator. Unoptimized code is toplevel code or code
6125 // that is not in a loop.
6126 if (is_global ||
6127 cgen_->scope()->is_global_scope() ||
6128 cgen_->loop_nesting() == 0) {
6129 Comment cmnt(masm, "[ Load from named Property");
6130 cgen_->frame()->Push(GetName());
6131
6132 RelocInfo::Mode mode = is_global
6133 ? RelocInfo::CODE_TARGET_CONTEXT
6134 : RelocInfo::CODE_TARGET;
6135 Result answer = cgen_->frame()->CallLoadIC(mode);
6136 // A test eax instruction following the call signals that the
6137 // inobject property case was inlined. Ensure that there is not
6138 // a test eax instruction here.
6139 __ nop();
6140 cgen_->frame()->Push(&answer);
6141 } else {
6142 // Inline the inobject property case.
6143 Comment cmnt(masm, "[ Inlined named property load");
6144 Result receiver = cgen_->frame()->Pop();
6145 receiver.ToRegister();
6146
6147 Result value = cgen_->allocator()->Allocate();
6148 ASSERT(value.is_valid());
6149 DeferredReferenceGetNamedValue* deferred =
6150 new DeferredReferenceGetNamedValue(value.reg(),
6151 receiver.reg(),
6152 GetName());
6153
6154 // Check that the receiver is a heap object.
6155 __ test(receiver.reg(), Immediate(kSmiTagMask));
6156 deferred->Branch(zero);
6157
6158 __ bind(deferred->patch_site());
6159 // This is the map check instruction that will be patched (so we can't
6160 // use the double underscore macro that may insert instructions).
6161 // Initially use an invalid map to force a failure.
6162 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6163 Immediate(Factory::null_value()));
6164 // This branch is always a forwards branch so it's always a fixed
6165 // size which allows the assert below to succeed and patching to work.
6166 deferred->Branch(not_equal);
6167
6168 // The delta from the patch label to the load offset must be
6169 // statically known.
6170 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
6171 LoadIC::kOffsetToLoadInstruction);
6172 // The initial (invalid) offset has to be large enough to force
6173 // a 32-bit instruction encoding to allow patching with an
6174 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
6175 int offset = kMaxInt;
6176 masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
6177
6178 __ IncrementCounter(&Counters::named_load_inline, 1);
6179 deferred->BindExit();
6180 cgen_->frame()->Push(&receiver);
6181 cgen_->frame()->Push(&value);
6182 }
6183 break;
6184 }
6185
6186 case KEYED: {
6187 // TODO(1241834): Make sure that this it is safe to ignore the
6188 // distinction between expressions in a typeof and not in a typeof.
6189 Comment cmnt(masm, "[ Load from keyed Property");
6190 Variable* var = expression_->AsVariableProxy()->AsVariable();
6191 bool is_global = var != NULL;
6192 ASSERT(!is_global || var->is_global());
6193
6194 // Inline array load code if inside of a loop. We do not know
6195 // the receiver map yet, so we initially generate the code with
6196 // a check against an invalid map. In the inline cache code, we
6197 // patch the map check if appropriate.
6198 if (cgen_->loop_nesting() > 0) {
6199 Comment cmnt(masm, "[ Inlined load from keyed Property");
6200
6201 Result key = cgen_->frame()->Pop();
6202 Result receiver = cgen_->frame()->Pop();
6203 key.ToRegister();
6204 receiver.ToRegister();
6205
6206 // Use a fresh temporary to load the elements without destroying
6207 // the receiver which is needed for the deferred slow case.
6208 Result elements = cgen_->allocator()->Allocate();
6209 ASSERT(elements.is_valid());
6210
6211 // Use a fresh temporary for the index and later the loaded
6212 // value.
6213 Result index = cgen_->allocator()->Allocate();
6214 ASSERT(index.is_valid());
6215
6216 DeferredReferenceGetKeyedValue* deferred =
6217 new DeferredReferenceGetKeyedValue(index.reg(),
6218 receiver.reg(),
6219 key.reg(),
6220 is_global);
6221
6222 // Check that the receiver is not a smi (only needed if this
6223 // is not a load from the global context) and that it has the
6224 // expected map.
6225 if (!is_global) {
6226 __ test(receiver.reg(), Immediate(kSmiTagMask));
6227 deferred->Branch(zero);
6228 }
6229
6230 // Initially, use an invalid map. The map is patched in the IC
6231 // initialization code.
6232 __ bind(deferred->patch_site());
6233 // Use masm-> here instead of the double underscore macro since extra
6234 // coverage code can interfere with the patching.
6235 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6236 Immediate(Factory::null_value()));
6237 deferred->Branch(not_equal);
6238
6239 // Check that the key is a smi.
6240 __ test(key.reg(), Immediate(kSmiTagMask));
6241 deferred->Branch(not_zero);
6242
6243 // Get the elements array from the receiver and check that it
6244 // is not a dictionary.
6245 __ mov(elements.reg(),
6246 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6247 __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
6248 Immediate(Factory::fixed_array_map()));
6249 deferred->Branch(not_equal);
6250
6251 // Shift the key to get the actual index value and check that
6252 // it is within bounds.
6253 __ mov(index.reg(), key.reg());
6254 __ sar(index.reg(), kSmiTagSize);
6255 __ cmp(index.reg(),
6256 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
6257 deferred->Branch(above_equal);
6258
6259 // Load and check that the result is not the hole. We could
6260 // reuse the index or elements register for the value.
6261 //
6262 // TODO(206): Consider whether it makes sense to try some
6263 // heuristic about which register to reuse. For example, if
6264 // one is eax, the we can reuse that one because the value
6265 // coming from the deferred code will be in eax.
6266 Result value = index;
6267 __ mov(value.reg(), Operand(elements.reg(),
6268 index.reg(),
6269 times_4,
6270 FixedArray::kHeaderSize - kHeapObjectTag));
6271 elements.Unuse();
6272 index.Unuse();
6273 __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
6274 deferred->Branch(equal);
6275 __ IncrementCounter(&Counters::keyed_load_inline, 1);
6276
6277 deferred->BindExit();
6278 // Restore the receiver and key to the frame and push the
6279 // result on top of it.
6280 cgen_->frame()->Push(&receiver);
6281 cgen_->frame()->Push(&key);
6282 cgen_->frame()->Push(&value);
6283
6284 } else {
6285 Comment cmnt(masm, "[ Load from keyed Property");
6286 RelocInfo::Mode mode = is_global
6287 ? RelocInfo::CODE_TARGET_CONTEXT
6288 : RelocInfo::CODE_TARGET;
6289 Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
6290 // Make sure that we do not have a test instruction after the
6291 // call. A test instruction after the call is used to
6292 // indicate that we have generated an inline version of the
6293 // keyed load. The explicit nop instruction is here because
6294 // the push that follows might be peep-hole optimized away.
6295 __ nop();
6296 cgen_->frame()->Push(&answer);
6297 }
6298 break;
6299 }
6300
6301 default:
6302 UNREACHABLE();
6303 }
6304}
6305
6306
6307void Reference::TakeValue(TypeofState typeof_state) {
6308 // For non-constant frame-allocated slots, we invalidate the value in the
6309 // slot. For all others, we fall back on GetValue.
6310 ASSERT(!cgen_->in_spilled_code());
6311 ASSERT(!is_illegal());
6312 if (type_ != SLOT) {
6313 GetValue(typeof_state);
6314 return;
6315 }
6316
6317 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6318 ASSERT(slot != NULL);
6319 if (slot->type() == Slot::LOOKUP ||
6320 slot->type() == Slot::CONTEXT ||
6321 slot->var()->mode() == Variable::CONST ||
6322 slot->is_arguments()) {
6323 GetValue(typeof_state);
6324 return;
6325 }
6326
6327 // Only non-constant, frame-allocated parameters and locals can
6328 // reach here. Be careful not to use the optimizations for arguments
6329 // object access since it may not have been initialized yet.
6330 ASSERT(!slot->is_arguments());
6331 if (slot->type() == Slot::PARAMETER) {
6332 cgen_->frame()->TakeParameterAt(slot->index());
6333 } else {
6334 ASSERT(slot->type() == Slot::LOCAL);
6335 cgen_->frame()->TakeLocalAt(slot->index());
6336 }
6337}
6338
6339
6340void Reference::SetValue(InitState init_state) {
6341 ASSERT(cgen_->HasValidEntryRegisters());
6342 ASSERT(!is_illegal());
6343 MacroAssembler* masm = cgen_->masm();
6344 switch (type_) {
6345 case SLOT: {
6346 Comment cmnt(masm, "[ Store to Slot");
6347 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6348 ASSERT(slot != NULL);
6349 cgen_->StoreToSlot(slot, init_state);
6350 break;
6351 }
6352
6353 case NAMED: {
6354 Comment cmnt(masm, "[ Store to named Property");
6355 cgen_->frame()->Push(GetName());
6356 Result answer = cgen_->frame()->CallStoreIC();
6357 cgen_->frame()->Push(&answer);
6358 break;
6359 }
6360
6361 case KEYED: {
6362 Comment cmnt(masm, "[ Store to keyed Property");
6363
6364 // Generate inlined version of the keyed store if the code is in
6365 // a loop and the key is likely to be a smi.
6366 Property* property = expression()->AsProperty();
6367 ASSERT(property != NULL);
6368 SmiAnalysis* key_smi_analysis = property->key()->type();
6369
6370 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6371 Comment cmnt(masm, "[ Inlined store to keyed Property");
6372
6373 // Get the receiver, key and value into registers.
6374 Result value = cgen_->frame()->Pop();
6375 Result key = cgen_->frame()->Pop();
6376 Result receiver = cgen_->frame()->Pop();
6377
6378 Result tmp = cgen_->allocator_->Allocate();
6379 ASSERT(tmp.is_valid());
6380
6381 // Determine whether the value is a constant before putting it
6382 // in a register.
6383 bool value_is_constant = value.is_constant();
6384
6385 // Make sure that value, key and receiver are in registers.
6386 value.ToRegister();
6387 key.ToRegister();
6388 receiver.ToRegister();
6389
6390 DeferredReferenceSetKeyedValue* deferred =
6391 new DeferredReferenceSetKeyedValue(value.reg(),
6392 key.reg(),
6393 receiver.reg());
6394
6395 // Check that the value is a smi if it is not a constant. We
6396 // can skip the write barrier for smis and constants.
6397 if (!value_is_constant) {
6398 __ test(value.reg(), Immediate(kSmiTagMask));
6399 deferred->Branch(not_zero);
6400 }
6401
6402 // Check that the key is a non-negative smi.
6403 __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
6404 deferred->Branch(not_zero);
6405
6406 // Check that the receiver is not a smi.
6407 __ test(receiver.reg(), Immediate(kSmiTagMask));
6408 deferred->Branch(zero);
6409
6410 // Check that the receiver is a JSArray.
6411 __ mov(tmp.reg(),
6412 FieldOperand(receiver.reg(), HeapObject::kMapOffset));
6413 __ movzx_b(tmp.reg(),
6414 FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
6415 __ cmp(tmp.reg(), JS_ARRAY_TYPE);
6416 deferred->Branch(not_equal);
6417
6418 // Check that the key is within bounds. Both the key and the
6419 // length of the JSArray are smis.
6420 __ cmp(key.reg(),
6421 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
6422 deferred->Branch(greater_equal);
6423
6424 // Get the elements array from the receiver and check that it
6425 // is not a dictionary.
6426 __ mov(tmp.reg(),
6427 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6428 // Bind the deferred code patch site to be able to locate the
6429 // fixed array map comparison. When debugging, we patch this
6430 // comparison to always fail so that we will hit the IC call
6431 // in the deferred code which will allow the debugger to
6432 // break for fast case stores.
6433 __ bind(deferred->patch_site());
6434 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6435 Immediate(Factory::fixed_array_map()));
6436 deferred->Branch(not_equal);
6437
6438 // Store the value.
6439 __ mov(Operand(tmp.reg(),
6440 key.reg(),
6441 times_2,
6442 FixedArray::kHeaderSize - kHeapObjectTag),
6443 value.reg());
6444 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6445
6446 deferred->BindExit();
6447
6448 cgen_->frame()->Push(&receiver);
6449 cgen_->frame()->Push(&key);
6450 cgen_->frame()->Push(&value);
6451 } else {
6452 Result answer = cgen_->frame()->CallKeyedStoreIC();
6453 // Make sure that we do not have a test instruction after the
6454 // call. A test instruction after the call is used to
6455 // indicate that we have generated an inline version of the
6456 // keyed store.
6457 __ nop();
6458 cgen_->frame()->Push(&answer);
6459 }
6460 break;
6461 }
6462
6463 default:
6464 UNREACHABLE();
6465 }
6466}
6467
6468
6469// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
6470void ToBooleanStub::Generate(MacroAssembler* masm) {
6471 Label false_result, true_result, not_string;
6472 __ mov(eax, Operand(esp, 1 * kPointerSize));
6473
6474 // 'null' => false.
6475 __ cmp(eax, Factory::null_value());
6476 __ j(equal, &false_result);
6477
6478 // Get the map and type of the heap object.
6479 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
6480 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
6481
6482 // Undetectable => false.
6483 __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
6484 __ and_(ebx, 1 << Map::kIsUndetectable);
6485 __ j(not_zero, &false_result);
6486
6487 // JavaScript object => true.
6488 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
6489 __ j(above_equal, &true_result);
6490
6491 // String value => false iff empty.
6492 __ cmp(ecx, FIRST_NONSTRING_TYPE);
6493 __ j(above_equal, &not_string);
6494 __ and_(ecx, kStringSizeMask);
6495 __ cmp(ecx, kShortStringTag);
6496 __ j(not_equal, &true_result); // Empty string is always short.
6497 __ mov(edx, FieldOperand(eax, String::kLengthOffset));
6498 __ shr(edx, String::kShortLengthShift);
6499 __ j(zero, &false_result);
6500 __ jmp(&true_result);
6501
6502 __ bind(&not_string);
6503 // HeapNumber => false iff +0, -0, or NaN.
6504 __ cmp(edx, Factory::heap_number_map());
6505 __ j(not_equal, &true_result);
6506 __ fldz();
6507 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
6508 __ fucompp();
6509 __ push(eax);
6510 __ fnstsw_ax();
6511 __ sahf();
6512 __ pop(eax);
6513 __ j(zero, &false_result);
6514 // Fall through to |true_result|.
6515
6516 // Return 1/0 for true/false in eax.
6517 __ bind(&true_result);
6518 __ mov(eax, 1);
6519 __ ret(1 * kPointerSize);
6520 __ bind(&false_result);
6521 __ mov(eax, 0);
6522 __ ret(1 * kPointerSize);
6523}
6524
6525
6526void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
6527 // Perform fast-case smi code for the operation (eax <op> ebx) and
6528 // leave result in register eax.
6529
6530 // Prepare the smi check of both operands by or'ing them together
6531 // before checking against the smi mask.
6532 __ mov(ecx, Operand(ebx));
6533 __ or_(ecx, Operand(eax));
6534
6535 switch (op_) {
6536 case Token::ADD:
6537 __ add(eax, Operand(ebx)); // add optimistically
6538 __ j(overflow, slow, not_taken);
6539 break;
6540
6541 case Token::SUB:
6542 __ sub(eax, Operand(ebx)); // subtract optimistically
6543 __ j(overflow, slow, not_taken);
6544 break;
6545
6546 case Token::DIV:
6547 case Token::MOD:
6548 // Sign extend eax into edx:eax.
6549 __ cdq();
6550 // Check for 0 divisor.
6551 __ test(ebx, Operand(ebx));
6552 __ j(zero, slow, not_taken);
6553 break;
6554
6555 default:
6556 // Fall-through to smi check.
6557 break;
6558 }
6559
6560 // Perform the actual smi check.
6561 ASSERT(kSmiTag == 0); // adjust zero check if not the case
6562 __ test(ecx, Immediate(kSmiTagMask));
6563 __ j(not_zero, slow, not_taken);
6564
6565 switch (op_) {
6566 case Token::ADD:
6567 case Token::SUB:
6568 // Do nothing here.
6569 break;
6570
6571 case Token::MUL:
6572 // If the smi tag is 0 we can just leave the tag on one operand.
6573 ASSERT(kSmiTag == 0); // adjust code below if not the case
6574 // Remove tag from one of the operands (but keep sign).
6575 __ sar(eax, kSmiTagSize);
6576 // Do multiplication.
6577 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
6578 // Go slow on overflows.
6579 __ j(overflow, slow, not_taken);
6580 // Check for negative zero result.
6581 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
6582 break;
6583
6584 case Token::DIV:
6585 // Divide edx:eax by ebx.
6586 __ idiv(ebx);
6587 // Check for the corner case of dividing the most negative smi
6588 // by -1. We cannot use the overflow flag, since it is not set
6589 // by idiv instruction.
6590 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
6591 __ cmp(eax, 0x40000000);
6592 __ j(equal, slow);
6593 // Check for negative zero result.
6594 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
6595 // Check that the remainder is zero.
6596 __ test(edx, Operand(edx));
6597 __ j(not_zero, slow);
6598 // Tag the result and store it in register eax.
6599 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
6600 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
6601 break;
6602
6603 case Token::MOD:
6604 // Divide edx:eax by ebx.
6605 __ idiv(ebx);
6606 // Check for negative zero result.
6607 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
6608 // Move remainder to register eax.
6609 __ mov(eax, Operand(edx));
6610 break;
6611
6612 case Token::BIT_OR:
6613 __ or_(eax, Operand(ebx));
6614 break;
6615
6616 case Token::BIT_AND:
6617 __ and_(eax, Operand(ebx));
6618 break;
6619
6620 case Token::BIT_XOR:
6621 __ xor_(eax, Operand(ebx));
6622 break;
6623
6624 case Token::SHL:
6625 case Token::SHR:
6626 case Token::SAR:
6627 // Move the second operand into register ecx.
6628 __ mov(ecx, Operand(ebx));
6629 // Remove tags from operands (but keep sign).
6630 __ sar(eax, kSmiTagSize);
6631 __ sar(ecx, kSmiTagSize);
6632 // Perform the operation.
6633 switch (op_) {
6634 case Token::SAR:
6635 __ sar(eax);
6636 // No checks of result necessary
6637 break;
6638 case Token::SHR:
6639 __ shr(eax);
6640 // Check that the *unsigned* result fits in a smi.
6641 // Neither of the two high-order bits can be set:
6642 // - 0x80000000: high bit would be lost when smi tagging.
6643 // - 0x40000000: this number would convert to negative when
6644 // Smi tagging these two cases can only happen with shifts
6645 // by 0 or 1 when handed a valid smi.
6646 __ test(eax, Immediate(0xc0000000));
6647 __ j(not_zero, slow, not_taken);
6648 break;
6649 case Token::SHL:
6650 __ shl(eax);
6651 // Check that the *signed* result fits in a smi.
6652 __ cmp(eax, 0xc0000000);
6653 __ j(sign, slow, not_taken);
6654 break;
6655 default:
6656 UNREACHABLE();
6657 }
6658 // Tag the result and store it in register eax.
6659 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
6660 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
6661 break;
6662
6663 default:
6664 UNREACHABLE();
6665 break;
6666 }
6667}
6668
6669
6670void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
6671 Label call_runtime;
6672
6673 if (flags_ == SMI_CODE_IN_STUB) {
6674 // The fast case smi code wasn't inlined in the stub caller
6675 // code. Generate it here to speed up common operations.
6676 Label slow;
6677 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y
6678 __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x
6679 GenerateSmiCode(masm, &slow);
6680 __ ret(2 * kPointerSize); // remove both operands
6681
6682 // Too bad. The fast case smi code didn't succeed.
6683 __ bind(&slow);
6684 }
6685
6686 // Setup registers.
6687 __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y
6688 __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x
6689
6690 // Floating point case.
6691 switch (op_) {
6692 case Token::ADD:
6693 case Token::SUB:
6694 case Token::MUL:
6695 case Token::DIV: {
6696 // eax: y
6697 // edx: x
6698
6699 if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
6700 CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
6701 FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
6702
6703 switch (op_) {
6704 case Token::ADD: __ addsd(xmm0, xmm1); break;
6705 case Token::SUB: __ subsd(xmm0, xmm1); break;
6706 case Token::MUL: __ mulsd(xmm0, xmm1); break;
6707 case Token::DIV: __ divsd(xmm0, xmm1); break;
6708 default: UNREACHABLE();
6709 }
6710 // Allocate a heap number, if needed.
6711 Label skip_allocation;
6712 switch (mode_) {
6713 case OVERWRITE_LEFT:
6714 __ mov(eax, Operand(edx));
6715 // Fall through!
6716 case OVERWRITE_RIGHT:
6717 // If the argument in eax is already an object, we skip the
6718 // allocation of a heap number.
6719 __ test(eax, Immediate(kSmiTagMask));
6720 __ j(not_zero, &skip_allocation, not_taken);
6721 // Fall through!
6722 case NO_OVERWRITE:
6723 FloatingPointHelper::AllocateHeapNumber(masm,
6724 &call_runtime,
6725 ecx,
6726 edx,
6727 eax);
6728 __ bind(&skip_allocation);
6729 break;
6730 default: UNREACHABLE();
6731 }
6732 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
6733 __ ret(2 * kPointerSize);
6734
6735 } else { // SSE2 not available, use FPU.
6736 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
6737 // Allocate a heap number, if needed.
6738 Label skip_allocation;
6739 switch (mode_) {
6740 case OVERWRITE_LEFT:
6741 __ mov(eax, Operand(edx));
6742 // Fall through!
6743 case OVERWRITE_RIGHT:
6744 // If the argument in eax is already an object, we skip the
6745 // allocation of a heap number.
6746 __ test(eax, Immediate(kSmiTagMask));
6747 __ j(not_zero, &skip_allocation, not_taken);
6748 // Fall through!
6749 case NO_OVERWRITE:
6750 FloatingPointHelper::AllocateHeapNumber(masm,
6751 &call_runtime,
6752 ecx,
6753 edx,
6754 eax);
6755 __ bind(&skip_allocation);
6756 break;
6757 default: UNREACHABLE();
6758 }
6759 FloatingPointHelper::LoadFloatOperands(masm, ecx);
6760
6761 switch (op_) {
6762 case Token::ADD: __ faddp(1); break;
6763 case Token::SUB: __ fsubp(1); break;
6764 case Token::MUL: __ fmulp(1); break;
6765 case Token::DIV: __ fdivp(1); break;
6766 default: UNREACHABLE();
6767 }
6768 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
6769 __ ret(2 * kPointerSize);
6770 }
6771 }
6772 case Token::MOD: {
6773 // For MOD we go directly to runtime in the non-smi case.
6774 break;
6775 }
6776 case Token::BIT_OR:
6777 case Token::BIT_AND:
6778 case Token::BIT_XOR:
6779 case Token::SAR:
6780 case Token::SHL:
6781 case Token::SHR: {
6782 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
6783 FloatingPointHelper::LoadFloatOperands(masm, ecx);
6784
6785 Label skip_allocation, non_smi_result, operand_conversion_failure;
6786
6787 // Reserve space for converted numbers.
6788 __ sub(Operand(esp), Immediate(2 * kPointerSize));
6789
6790 if (use_sse3_) {
6791 // Truncate the operands to 32-bit integers and check for
6792 // exceptions in doing so.
6793 CpuFeatures::Scope scope(CpuFeatures::SSE3);
6794 __ fisttp_s(Operand(esp, 0 * kPointerSize));
6795 __ fisttp_s(Operand(esp, 1 * kPointerSize));
6796 __ fnstsw_ax();
6797 __ test(eax, Immediate(1));
6798 __ j(not_zero, &operand_conversion_failure);
6799 } else {
6800 // Check if right operand is int32.
6801 __ fist_s(Operand(esp, 0 * kPointerSize));
6802 __ fild_s(Operand(esp, 0 * kPointerSize));
6803 __ fucompp();
6804 __ fnstsw_ax();
6805 __ sahf();
6806 __ j(not_zero, &operand_conversion_failure);
6807 __ j(parity_even, &operand_conversion_failure);
6808
6809 // Check if left operand is int32.
6810 __ fist_s(Operand(esp, 1 * kPointerSize));
6811 __ fild_s(Operand(esp, 1 * kPointerSize));
6812 __ fucompp();
6813 __ fnstsw_ax();
6814 __ sahf();
6815 __ j(not_zero, &operand_conversion_failure);
6816 __ j(parity_even, &operand_conversion_failure);
6817 }
6818
6819 // Get int32 operands and perform bitop.
6820 __ pop(ecx);
6821 __ pop(eax);
6822 switch (op_) {
6823 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
6824 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
6825 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
6826 case Token::SAR: __ sar(eax); break;
6827 case Token::SHL: __ shl(eax); break;
6828 case Token::SHR: __ shr(eax); break;
6829 default: UNREACHABLE();
6830 }
6831 if (op_ == Token::SHR) {
6832 // Check if result is non-negative and fits in a smi.
6833 __ test(eax, Immediate(0xc0000000));
6834 __ j(not_zero, &non_smi_result);
6835 } else {
6836 // Check if result fits in a smi.
6837 __ cmp(eax, 0xc0000000);
6838 __ j(negative, &non_smi_result);
6839 }
6840 // Tag smi result and return.
6841 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
6842 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
6843 __ ret(2 * kPointerSize);
6844
6845 // All ops except SHR return a signed int32 that we load in a HeapNumber.
6846 if (op_ != Token::SHR) {
6847 __ bind(&non_smi_result);
6848 // Allocate a heap number if needed.
6849 __ mov(ebx, Operand(eax)); // ebx: result
6850 switch (mode_) {
6851 case OVERWRITE_LEFT:
6852 case OVERWRITE_RIGHT:
6853 // If the operand was an object, we skip the
6854 // allocation of a heap number.
6855 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
6856 1 * kPointerSize : 2 * kPointerSize));
6857 __ test(eax, Immediate(kSmiTagMask));
6858 __ j(not_zero, &skip_allocation, not_taken);
6859 // Fall through!
6860 case NO_OVERWRITE:
6861 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
6862 ecx, edx, eax);
6863 __ bind(&skip_allocation);
6864 break;
6865 default: UNREACHABLE();
6866 }
6867 // Store the result in the HeapNumber and return.
6868 __ mov(Operand(esp, 1 * kPointerSize), ebx);
6869 __ fild_s(Operand(esp, 1 * kPointerSize));
6870 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
6871 __ ret(2 * kPointerSize);
6872 }
6873
6874 // Clear the FPU exception flag and reset the stack before calling
6875 // the runtime system.
6876 __ bind(&operand_conversion_failure);
6877 __ add(Operand(esp), Immediate(2 * kPointerSize));
6878 if (use_sse3_) {
6879 // If we've used the SSE3 instructions for truncating the
6880 // floating point values to integers and it failed, we have a
6881 // pending #IA exception. Clear it.
6882 __ fnclex();
6883 } else {
6884 // The non-SSE3 variant does early bailout if the right
6885 // operand isn't a 32-bit integer, so we may have a single
6886 // value on the FPU stack we need to get rid of.
6887 __ ffree(0);
6888 }
6889
6890 // SHR should return uint32 - go to runtime for non-smi/negative result.
6891 if (op_ == Token::SHR) {
6892 __ bind(&non_smi_result);
6893 }
6894 __ mov(eax, Operand(esp, 1 * kPointerSize));
6895 __ mov(edx, Operand(esp, 2 * kPointerSize));
6896 break;
6897 }
6898 default: UNREACHABLE(); break;
6899 }
6900
6901 // If all else fails, use the runtime system to get the correct
6902 // result.
6903 __ bind(&call_runtime);
6904 switch (op_) {
6905 case Token::ADD: {
6906 // Test for string arguments before calling runtime.
6907 Label not_strings, both_strings, not_string1, string1;
6908 Result answer;
6909 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
6910 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
6911 __ test(eax, Immediate(kSmiTagMask));
6912 __ j(zero, &not_string1);
6913 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
6914 __ j(above_equal, &not_string1);
6915
6916 // First argument is a a string, test second.
6917 __ test(edx, Immediate(kSmiTagMask));
6918 __ j(zero, &string1);
6919 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
6920 __ j(above_equal, &string1);
6921
6922 // First and second argument are strings.
6923 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
6924
6925 // Only first argument is a string.
6926 __ bind(&string1);
6927 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
6928
6929 // First argument was not a string, test second.
6930 __ bind(&not_string1);
6931 __ test(edx, Immediate(kSmiTagMask));
6932 __ j(zero, &not_strings);
6933 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
6934 __ j(above_equal, &not_strings);
6935
6936 // Only second argument is a string.
6937 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
6938
6939 __ bind(&not_strings);
6940 // Neither argument is a string.
6941 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
6942 break;
6943 }
6944 case Token::SUB:
6945 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
6946 break;
6947 case Token::MUL:
6948 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
6949 break;
6950 case Token::DIV:
6951 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
6952 break;
6953 case Token::MOD:
6954 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
6955 break;
6956 case Token::BIT_OR:
6957 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
6958 break;
6959 case Token::BIT_AND:
6960 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
6961 break;
6962 case Token::BIT_XOR:
6963 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
6964 break;
6965 case Token::SAR:
6966 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
6967 break;
6968 case Token::SHL:
6969 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
6970 break;
6971 case Token::SHR:
6972 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
6973 break;
6974 default:
6975 UNREACHABLE();
6976 }
6977}
6978
6979
6980void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
6981 Label* need_gc,
6982 Register scratch1,
6983 Register scratch2,
6984 Register result) {
6985 // Allocate heap number in new space.
6986 __ AllocateInNewSpace(HeapNumber::kSize,
6987 result,
6988 scratch1,
6989 scratch2,
6990 need_gc,
6991 TAG_OBJECT);
6992
6993 // Set the map.
6994 __ mov(FieldOperand(result, HeapObject::kMapOffset),
6995 Immediate(Factory::heap_number_map()));
6996}
6997
6998
6999void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7000 Register number) {
7001 Label load_smi, done;
7002
7003 __ test(number, Immediate(kSmiTagMask));
7004 __ j(zero, &load_smi, not_taken);
7005 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7006 __ jmp(&done);
7007
7008 __ bind(&load_smi);
7009 __ sar(number, kSmiTagSize);
7010 __ push(number);
7011 __ fild_s(Operand(esp, 0));
7012 __ pop(number);
7013
7014 __ bind(&done);
7015}
7016
7017
7018void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
7019 Label* not_numbers) {
7020 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
7021 // Load operand in edx into xmm0, or branch to not_numbers.
7022 __ test(edx, Immediate(kSmiTagMask));
7023 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
7024 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
7025 __ j(not_equal, not_numbers); // Argument in edx is not a number.
7026 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
7027 __ bind(&load_eax);
7028 // Load operand in eax into xmm1, or branch to not_numbers.
7029 __ test(eax, Immediate(kSmiTagMask));
7030 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
7031 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
7032 __ j(equal, &load_float_eax);
7033 __ jmp(not_numbers); // Argument in eax is not a number.
7034 __ bind(&load_smi_edx);
7035 __ sar(edx, 1); // Untag smi before converting to float.
7036 __ cvtsi2sd(xmm0, Operand(edx));
7037 __ shl(edx, 1); // Retag smi for heap number overwriting test.
7038 __ jmp(&load_eax);
7039 __ bind(&load_smi_eax);
7040 __ sar(eax, 1); // Untag smi before converting to float.
7041 __ cvtsi2sd(xmm1, Operand(eax));
7042 __ shl(eax, 1); // Retag smi for heap number overwriting test.
7043 __ jmp(&done);
7044 __ bind(&load_float_eax);
7045 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
7046 __ bind(&done);
7047}
7048
7049
7050void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7051 Register scratch) {
7052 Label load_smi_1, load_smi_2, done_load_1, done;
7053 __ mov(scratch, Operand(esp, 2 * kPointerSize));
7054 __ test(scratch, Immediate(kSmiTagMask));
7055 __ j(zero, &load_smi_1, not_taken);
7056 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
7057 __ bind(&done_load_1);
7058
7059 __ mov(scratch, Operand(esp, 1 * kPointerSize));
7060 __ test(scratch, Immediate(kSmiTagMask));
7061 __ j(zero, &load_smi_2, not_taken);
7062 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
7063 __ jmp(&done);
7064
7065 __ bind(&load_smi_1);
7066 __ sar(scratch, kSmiTagSize);
7067 __ push(scratch);
7068 __ fild_s(Operand(esp, 0));
7069 __ pop(scratch);
7070 __ jmp(&done_load_1);
7071
7072 __ bind(&load_smi_2);
7073 __ sar(scratch, kSmiTagSize);
7074 __ push(scratch);
7075 __ fild_s(Operand(esp, 0));
7076 __ pop(scratch);
7077
7078 __ bind(&done);
7079}
7080
7081
7082void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
7083 Label* non_float,
7084 Register scratch) {
7085 Label test_other, done;
7086 // Test if both operands are floats or smi -> scratch=k_is_float;
7087 // Otherwise scratch = k_not_float.
7088 __ test(edx, Immediate(kSmiTagMask));
7089 __ j(zero, &test_other, not_taken); // argument in edx is OK
7090 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
7091 __ cmp(scratch, Factory::heap_number_map());
7092 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
7093
7094 __ bind(&test_other);
7095 __ test(eax, Immediate(kSmiTagMask));
7096 __ j(zero, &done); // argument in eax is OK
7097 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
7098 __ cmp(scratch, Factory::heap_number_map());
7099 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
7100
7101 // Fall-through: Both operands are numbers.
7102 __ bind(&done);
7103}
7104
7105
7106void UnarySubStub::Generate(MacroAssembler* masm) {
7107 Label undo;
7108 Label slow;
7109 Label done;
7110 Label try_float;
7111
7112 // Check whether the value is a smi.
7113 __ test(eax, Immediate(kSmiTagMask));
7114 __ j(not_zero, &try_float, not_taken);
7115
7116 // Enter runtime system if the value of the expression is zero
7117 // to make sure that we switch between 0 and -0.
7118 __ test(eax, Operand(eax));
7119 __ j(zero, &slow, not_taken);
7120
7121 // The value of the expression is a smi that is not zero. Try
7122 // optimistic subtraction '0 - value'.
7123 __ mov(edx, Operand(eax));
7124 __ Set(eax, Immediate(0));
7125 __ sub(eax, Operand(edx));
7126 __ j(overflow, &undo, not_taken);
7127
7128 // If result is a smi we are done.
7129 __ test(eax, Immediate(kSmiTagMask));
7130 __ j(zero, &done, taken);
7131
7132 // Restore eax and enter runtime system.
7133 __ bind(&undo);
7134 __ mov(eax, Operand(edx));
7135
7136 // Enter runtime system.
7137 __ bind(&slow);
7138 __ pop(ecx); // pop return address
7139 __ push(eax);
7140 __ push(ecx); // push return address
7141 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
7142
7143 // Try floating point case.
7144 __ bind(&try_float);
7145 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
7146 __ cmp(edx, Factory::heap_number_map());
7147 __ j(not_equal, &slow);
7148 if (overwrite_) {
7149 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
7150 __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
7151 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
7152 } else {
7153 __ mov(edx, Operand(eax));
7154 // edx: operand
7155 FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
7156 // eax: allocated 'empty' number
7157 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
7158 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
7159 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
7160 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
7161 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
7162 }
7163
7164 __ bind(&done);
7165
7166 __ StubReturn(1);
7167}
7168
7169
7170void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
7171 // Check if the calling frame is an arguments adaptor frame.
7172 Label adaptor;
7173 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
7174 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
7175 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
7176 __ j(equal, &adaptor);
7177
7178 // Nothing to do: The formal number of parameters has already been
7179 // passed in register eax by calling function. Just return it.
7180 __ ret(0);
7181
7182 // Arguments adaptor case: Read the arguments length from the
7183 // adaptor frame and return it.
7184 __ bind(&adaptor);
7185 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7186 __ ret(0);
7187}
7188
7189
7190void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
7191 // The key is in edx and the parameter count is in eax.
7192
7193 // The displacement is used for skipping the frame pointer on the
7194 // stack. It is the offset of the last parameter (if any) relative
7195 // to the frame pointer.
7196 static const int kDisplacement = 1 * kPointerSize;
7197
7198 // Check that the key is a smi.
7199 Label slow;
7200 __ test(edx, Immediate(kSmiTagMask));
7201 __ j(not_zero, &slow, not_taken);
7202
7203 // Check if the calling frame is an arguments adaptor frame.
7204 Label adaptor;
7205 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
7206 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
7207 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
7208 __ j(equal, &adaptor);
7209
7210 // Check index against formal parameters count limit passed in
7211 // through register eax. Use unsigned comparison to get negative
7212 // check for free.
7213 __ cmp(edx, Operand(eax));
7214 __ j(above_equal, &slow, not_taken);
7215
7216 // Read the argument from the stack and return it.
7217 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
7218 __ lea(ebx, Operand(ebp, eax, times_2, 0));
7219 __ neg(edx);
7220 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
7221 __ ret(0);
7222
7223 // Arguments adaptor case: Check index against actual arguments
7224 // limit found in the arguments adaptor frame. Use unsigned
7225 // comparison to get negative check for free.
7226 __ bind(&adaptor);
7227 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7228 __ cmp(edx, Operand(ecx));
7229 __ j(above_equal, &slow, not_taken);
7230
7231 // Read the argument from the stack and return it.
7232 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
7233 __ lea(ebx, Operand(ebx, ecx, times_2, 0));
7234 __ neg(edx);
7235 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
7236 __ ret(0);
7237
7238 // Slow-case: Handle non-smi or out-of-bounds access to arguments
7239 // by calling the runtime system.
7240 __ bind(&slow);
7241 __ pop(ebx); // Return address.
7242 __ push(edx);
7243 __ push(ebx);
7244 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
7245}
7246
7247
7248void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
7249 // The displacement is used for skipping the return address and the
7250 // frame pointer on the stack. It is the offset of the last
7251 // parameter (if any) relative to the frame pointer.
7252 static const int kDisplacement = 2 * kPointerSize;
7253
7254 // Check if the calling frame is an arguments adaptor frame.
7255 Label runtime;
7256 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
7257 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
7258 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
7259 __ j(not_equal, &runtime);
7260
7261 // Patch the arguments.length and the parameters pointer.
7262 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7263 __ mov(Operand(esp, 1 * kPointerSize), ecx);
7264 __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
7265 __ mov(Operand(esp, 2 * kPointerSize), edx);
7266
7267 // Do the runtime call to allocate the arguments object.
7268 __ bind(&runtime);
7269 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
7270}
7271
7272
7273void CompareStub::Generate(MacroAssembler* masm) {
7274 Label call_builtin, done;
7275
7276 // NOTICE! This code is only reached after a smi-fast-case check, so
7277 // it is certain that at least one operand isn't a smi.
7278
7279 if (cc_ == equal) { // Both strict and non-strict.
7280 Label slow; // Fallthrough label.
7281 // Equality is almost reflexive (everything but NaN), so start by testing
7282 // for "identity and not NaN".
7283 {
7284 Label not_identical;
7285 __ cmp(eax, Operand(edx));
7286 __ j(not_equal, &not_identical);
7287 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7288 // so we do the second best thing - test it ourselves.
7289
7290 Label return_equal;
7291 Label heap_number;
7292 // If it's not a heap number, then return equal.
7293 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
7294 Immediate(Factory::heap_number_map()));
7295 __ j(equal, &heap_number);
7296 __ bind(&return_equal);
7297 __ Set(eax, Immediate(0));
7298 __ ret(0);
7299
7300 __ bind(&heap_number);
7301 // It is a heap number, so return non-equal if it's NaN and equal if it's
7302 // not NaN.
7303 // The representation of NaN values has all exponent bits (52..62) set,
7304 // and not all mantissa bits (0..51) clear.
7305 // Read top bits of double representation (second word of value).
7306 __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
7307 // Test that exponent bits are all set.
7308 __ not_(eax);
7309 __ test(eax, Immediate(0x7ff00000));
7310 __ j(not_zero, &return_equal);
7311 __ not_(eax);
7312
7313 // Shift out flag and all exponent bits, retaining only mantissa.
7314 __ shl(eax, 12);
7315 // Or with all low-bits of mantissa.
7316 __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
7317 // Return zero equal if all bits in mantissa is zero (it's an Infinity)
7318 // and non-zero if not (it's a NaN).
7319 __ ret(0);
7320
7321 __ bind(&not_identical);
7322 }
7323
7324 // If we're doing a strict equality comparison, we don't have to do
7325 // type conversion, so we generate code to do fast comparison for objects
7326 // and oddballs. Non-smi numbers and strings still go through the usual
7327 // slow-case code.
7328 if (strict_) {
7329 // If either is a Smi (we know that not both are), then they can only
7330 // be equal if the other is a HeapNumber. If so, use the slow case.
7331 {
7332 Label not_smis;
7333 ASSERT_EQ(0, kSmiTag);
7334 ASSERT_EQ(0, Smi::FromInt(0));
7335 __ mov(ecx, Immediate(kSmiTagMask));
7336 __ and_(ecx, Operand(eax));
7337 __ test(ecx, Operand(edx));
7338 __ j(not_zero, &not_smis);
7339 // One operand is a smi.
7340
7341 // Check whether the non-smi is a heap number.
7342 ASSERT_EQ(1, kSmiTagMask);
7343 // ecx still holds eax & kSmiTag, which is either zero or one.
7344 __ sub(Operand(ecx), Immediate(0x01));
7345 __ mov(ebx, edx);
7346 __ xor_(ebx, Operand(eax));
7347 __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
7348 __ xor_(ebx, Operand(eax));
7349 // if eax was smi, ebx is now edx, else eax.
7350
7351 // Check if the non-smi operand is a heap number.
7352 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
7353 Immediate(Factory::heap_number_map()));
7354 // If heap number, handle it in the slow case.
7355 __ j(equal, &slow);
7356 // Return non-equal (ebx is not zero)
7357 __ mov(eax, ebx);
7358 __ ret(0);
7359
7360 __ bind(&not_smis);
7361 }
7362
7363 // If either operand is a JSObject or an oddball value, then they are not
7364 // equal since their pointers are different
7365 // There is no test for undetectability in strict equality.
7366
7367 // Get the type of the first operand.
7368 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
7369 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
7370
7371 // If the first object is a JS object, we have done pointer comparison.
7372 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7373 Label first_non_object;
7374 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
7375 __ j(less, &first_non_object);
7376
7377 // Return non-zero (eax is not zero)
7378 Label return_not_equal;
7379 ASSERT(kHeapObjectTag != 0);
7380 __ bind(&return_not_equal);
7381 __ ret(0);
7382
7383 __ bind(&first_non_object);
7384 // Check for oddballs: true, false, null, undefined.
7385 __ cmp(ecx, ODDBALL_TYPE);
7386 __ j(equal, &return_not_equal);
7387
7388 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
7389 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
7390
7391 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
7392 __ j(greater_equal, &return_not_equal);
7393
7394 // Check for oddballs: true, false, null, undefined.
7395 __ cmp(ecx, ODDBALL_TYPE);
7396 __ j(equal, &return_not_equal);
7397
7398 // Fall through to the general case.
7399 }
7400 __ bind(&slow);
7401 }
7402
7403 // Push arguments below the return address.
7404 __ pop(ecx);
7405 __ push(eax);
7406 __ push(edx);
7407 __ push(ecx);
7408
7409 // Inlined floating point compare.
7410 // Call builtin if operands are not floating point or smi.
7411 Label check_for_symbols;
7412 Label unordered;
7413 if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
7414 CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
7415 CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
7416
7417 FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
7418 __ comisd(xmm0, xmm1);
7419
7420 // Jump to builtin for NaN.
7421 __ j(parity_even, &unordered, not_taken);
7422 __ mov(eax, 0); // equal
7423 __ mov(ecx, Immediate(Smi::FromInt(1)));
7424 __ cmov(above, eax, Operand(ecx));
7425 __ mov(ecx, Immediate(Smi::FromInt(-1)));
7426 __ cmov(below, eax, Operand(ecx));
7427 __ ret(2 * kPointerSize);
7428 } else {
7429 FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
7430 FloatingPointHelper::LoadFloatOperands(masm, ecx);
7431 __ FCmp();
7432
7433 // Jump to builtin for NaN.
7434 __ j(parity_even, &unordered, not_taken);
7435
7436 Label below_lbl, above_lbl;
7437 // Return a result of -1, 0, or 1, to indicate result of comparison.
7438 __ j(below, &below_lbl, not_taken);
7439 __ j(above, &above_lbl, not_taken);
7440
7441 __ xor_(eax, Operand(eax)); // equal
7442 // Both arguments were pushed in case a runtime call was needed.
7443 __ ret(2 * kPointerSize);
7444
7445 __ bind(&below_lbl);
7446 __ mov(eax, Immediate(Smi::FromInt(-1)));
7447 __ ret(2 * kPointerSize);
7448
7449 __ bind(&above_lbl);
7450 __ mov(eax, Immediate(Smi::FromInt(1)));
7451 __ ret(2 * kPointerSize); // eax, edx were pushed
7452 }
7453 // If one of the numbers was NaN, then the result is always false.
7454 // The cc is never not-equal.
7455 __ bind(&unordered);
7456 ASSERT(cc_ != not_equal);
7457 if (cc_ == less || cc_ == less_equal) {
7458 __ mov(eax, Immediate(Smi::FromInt(1)));
7459 } else {
7460 __ mov(eax, Immediate(Smi::FromInt(-1)));
7461 }
7462 __ ret(2 * kPointerSize); // eax, edx were pushed
7463
7464 // Fast negative check for symbol-to-symbol equality.
7465 __ bind(&check_for_symbols);
7466 if (cc_ == equal) {
7467 BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
7468 BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
7469
7470 // We've already checked for object identity, so if both operands
7471 // are symbols they aren't equal. Register eax already holds a
7472 // non-zero value, which indicates not equal, so just return.
7473 __ ret(2 * kPointerSize);
7474 }
7475
7476 __ bind(&call_builtin);
7477 // must swap argument order
7478 __ pop(ecx);
7479 __ pop(edx);
7480 __ pop(eax);
7481 __ push(edx);
7482 __ push(eax);
7483
7484 // Figure out which native to call and setup the arguments.
7485 Builtins::JavaScript builtin;
7486 if (cc_ == equal) {
7487 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7488 } else {
7489 builtin = Builtins::COMPARE;
7490 int ncr; // NaN compare result
7491 if (cc_ == less || cc_ == less_equal) {
7492 ncr = GREATER;
7493 } else {
7494 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
7495 ncr = LESS;
7496 }
7497 __ push(Immediate(Smi::FromInt(ncr)));
7498 }
7499
7500 // Restore return address on the stack.
7501 __ push(ecx);
7502
7503 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7504 // tagged as a small integer.
7505 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
7506}
7507
7508
7509void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
7510 Label* label,
7511 Register object,
7512 Register scratch) {
7513 __ test(object, Immediate(kSmiTagMask));
7514 __ j(zero, label);
7515 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
7516 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
7517 __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
7518 __ cmp(scratch, kSymbolTag | kStringTag);
7519 __ j(not_equal, label);
7520}
7521
7522
7523void StackCheckStub::Generate(MacroAssembler* masm) {
7524 // Because builtins always remove the receiver from the stack, we
7525 // have to fake one to avoid underflowing the stack. The receiver
7526 // must be inserted below the return address on the stack so we
7527 // temporarily store that in a register.
7528 __ pop(eax);
7529 __ push(Immediate(Smi::FromInt(0)));
7530 __ push(eax);
7531
7532 // Do tail-call to runtime routine.
7533 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
7534}
7535
7536
7537void CallFunctionStub::Generate(MacroAssembler* masm) {
7538 Label slow;
7539
7540 // Get the function to call from the stack.
7541 // +2 ~ receiver, return address
7542 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
7543
7544 // Check that the function really is a JavaScript function.
7545 __ test(edi, Immediate(kSmiTagMask));
7546 __ j(zero, &slow, not_taken);
7547 // Goto slow case if we do not have a function.
7548 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
7549 __ j(not_equal, &slow, not_taken);
7550
7551 // Fast-case: Just invoke the function.
7552 ParameterCount actual(argc_);
7553 __ InvokeFunction(edi, actual, JUMP_FUNCTION);
7554
7555 // Slow-case: Non-function called.
7556 __ bind(&slow);
7557 __ Set(eax, Immediate(argc_));
7558 __ Set(ebx, Immediate(0));
7559 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
7560 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
7561 __ jmp(adaptor, RelocInfo::CODE_TARGET);
7562}
7563
7564
7565int CEntryStub::MinorKey() {
7566 ASSERT(result_size_ <= 2);
7567 // Result returned in eax, or eax+edx if result_size_ is 2.
7568 return 0;
7569}
7570
7571
7572void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
7573 // eax holds the exception.
7574
7575 // Adjust this code if not the case.
7576 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7577
7578 // Drop the sp to the top of the handler.
7579 ExternalReference handler_address(Top::k_handler_address);
7580 __ mov(esp, Operand::StaticVariable(handler_address));
7581
7582 // Restore next handler and frame pointer, discard handler state.
7583 ASSERT(StackHandlerConstants::kNextOffset == 0);
7584 __ pop(Operand::StaticVariable(handler_address));
7585 ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
7586 __ pop(ebp);
7587 __ pop(edx); // Remove state.
7588
7589 // Before returning we restore the context from the frame pointer if
7590 // not NULL. The frame pointer is NULL in the exception handler of
7591 // a JS entry frame.
7592 __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
7593 Label skip;
7594 __ cmp(ebp, 0);
7595 __ j(equal, &skip, not_taken);
7596 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
7597 __ bind(&skip);
7598
7599 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7600 __ ret(0);
7601}
7602
7603
7604void CEntryStub::GenerateCore(MacroAssembler* masm,
7605 Label* throw_normal_exception,
7606 Label* throw_termination_exception,
7607 Label* throw_out_of_memory_exception,
7608 StackFrame::Type frame_type,
7609 bool do_gc,
7610 bool always_allocate_scope) {
7611 // eax: result parameter for PerformGC, if any
7612 // ebx: pointer to C function (C callee-saved)
7613 // ebp: frame pointer (restored after C call)
7614 // esp: stack pointer (restored after C call)
7615 // edi: number of arguments including receiver (C callee-saved)
7616 // esi: pointer to the first argument (C callee-saved)
7617
7618 if (do_gc) {
7619 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
7620 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
7621 }
7622
7623 ExternalReference scope_depth =
7624 ExternalReference::heap_always_allocate_scope_depth();
7625 if (always_allocate_scope) {
7626 __ inc(Operand::StaticVariable(scope_depth));
7627 }
7628
7629 // Call C function.
7630 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
7631 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
7632 __ call(Operand(ebx));
7633 // Result is in eax or edx:eax - do not destroy these registers!
7634
7635 if (always_allocate_scope) {
7636 __ dec(Operand::StaticVariable(scope_depth));
7637 }
7638
7639 // Make sure we're not trying to return 'the hole' from the runtime
7640 // call as this may lead to crashes in the IC code later.
7641 if (FLAG_debug_code) {
7642 Label okay;
7643 __ cmp(eax, Factory::the_hole_value());
7644 __ j(not_equal, &okay);
7645 __ int3();
7646 __ bind(&okay);
7647 }
7648
7649 // Check for failure result.
7650 Label failure_returned;
7651 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
7652 __ lea(ecx, Operand(eax, 1));
7653 // Lower 2 bits of ecx are 0 iff eax has failure tag.
7654 __ test(ecx, Immediate(kFailureTagMask));
7655 __ j(zero, &failure_returned, not_taken);
7656
7657 // Exit the JavaScript to C++ exit frame.
7658 __ LeaveExitFrame(frame_type);
7659 __ ret(0);
7660
7661 // Handling of failure.
7662 __ bind(&failure_returned);
7663
7664 Label retry;
7665 // If the returned exception is RETRY_AFTER_GC continue at retry label
7666 ASSERT(Failure::RETRY_AFTER_GC == 0);
7667 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
7668 __ j(zero, &retry, taken);
7669
7670 // Special handling of out of memory exceptions.
7671 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
7672 __ j(equal, throw_out_of_memory_exception);
7673
7674 // Retrieve the pending exception and clear the variable.
7675 ExternalReference pending_exception_address(Top::k_pending_exception_address);
7676 __ mov(eax, Operand::StaticVariable(pending_exception_address));
7677 __ mov(edx,
7678 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
7679 __ mov(Operand::StaticVariable(pending_exception_address), edx);
7680
7681 // Special handling of termination exceptions which are uncatchable
7682 // by javascript code.
7683 __ cmp(eax, Factory::termination_exception());
7684 __ j(equal, throw_termination_exception);
7685
7686 // Handle normal exception.
7687 __ jmp(throw_normal_exception);
7688
7689 // Retry.
7690 __ bind(&retry);
7691}
7692
7693
7694void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
7695 UncatchableExceptionType type) {
7696 // Adjust this code if not the case.
7697 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7698
7699 // Drop sp to the top stack handler.
7700 ExternalReference handler_address(Top::k_handler_address);
7701 __ mov(esp, Operand::StaticVariable(handler_address));
7702
7703 // Unwind the handlers until the ENTRY handler is found.
7704 Label loop, done;
7705 __ bind(&loop);
7706 // Load the type of the current stack handler.
7707 const int kStateOffset = StackHandlerConstants::kStateOffset;
7708 __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
7709 __ j(equal, &done);
7710 // Fetch the next handler in the list.
7711 const int kNextOffset = StackHandlerConstants::kNextOffset;
7712 __ mov(esp, Operand(esp, kNextOffset));
7713 __ jmp(&loop);
7714 __ bind(&done);
7715
7716 // Set the top handler address to next handler past the current ENTRY handler.
7717 ASSERT(StackHandlerConstants::kNextOffset == 0);
7718 __ pop(Operand::StaticVariable(handler_address));
7719
7720 if (type == OUT_OF_MEMORY) {
7721 // Set external caught exception to false.
7722 ExternalReference external_caught(Top::k_external_caught_exception_address);
7723 __ mov(eax, false);
7724 __ mov(Operand::StaticVariable(external_caught), eax);
7725
7726 // Set pending exception and eax to out of memory exception.
7727 ExternalReference pending_exception(Top::k_pending_exception_address);
7728 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
7729 __ mov(Operand::StaticVariable(pending_exception), eax);
7730 }
7731
7732 // Clear the context pointer.
7733 __ xor_(esi, Operand(esi));
7734
7735 // Restore fp from handler and discard handler state.
7736 ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
7737 __ pop(ebp);
7738 __ pop(edx); // State.
7739
7740 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7741 __ ret(0);
7742}
7743
7744
7745void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
7746 // eax: number of arguments including receiver
7747 // ebx: pointer to C function (C callee-saved)
7748 // ebp: frame pointer (restored after C call)
7749 // esp: stack pointer (restored after C call)
7750 // esi: current context (C callee-saved)
7751 // edi: JS function of the caller (C callee-saved)
7752
7753 // NOTE: Invocations of builtins may return failure objects instead
7754 // of a proper result. The builtin entry handles this by performing
7755 // a garbage collection and retrying the builtin (twice).
7756
7757 StackFrame::Type frame_type = is_debug_break ?
7758 StackFrame::EXIT_DEBUG :
7759 StackFrame::EXIT;
7760
7761 // Enter the exit frame that transitions from JavaScript to C++.
7762 __ EnterExitFrame(frame_type);
7763
7764 // eax: result parameter for PerformGC, if any (setup below)
7765 // ebx: pointer to builtin function (C callee-saved)
7766 // ebp: frame pointer (restored after C call)
7767 // esp: stack pointer (restored after C call)
7768 // edi: number of arguments including receiver (C callee-saved)
7769 // esi: argv pointer (C callee-saved)
7770
7771 Label throw_normal_exception;
7772 Label throw_termination_exception;
7773 Label throw_out_of_memory_exception;
7774
7775 // Call into the runtime system.
7776 GenerateCore(masm,
7777 &throw_normal_exception,
7778 &throw_termination_exception,
7779 &throw_out_of_memory_exception,
7780 frame_type,
7781 false,
7782 false);
7783
7784 // Do space-specific GC and retry runtime call.
7785 GenerateCore(masm,
7786 &throw_normal_exception,
7787 &throw_termination_exception,
7788 &throw_out_of_memory_exception,
7789 frame_type,
7790 true,
7791 false);
7792
7793 // Do full GC and retry runtime call one final time.
7794 Failure* failure = Failure::InternalError();
7795 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
7796 GenerateCore(masm,
7797 &throw_normal_exception,
7798 &throw_termination_exception,
7799 &throw_out_of_memory_exception,
7800 frame_type,
7801 true,
7802 true);
7803
7804 __ bind(&throw_out_of_memory_exception);
7805 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
7806
7807 __ bind(&throw_termination_exception);
7808 GenerateThrowUncatchable(masm, TERMINATION);
7809
7810 __ bind(&throw_normal_exception);
7811 GenerateThrowTOS(masm);
7812}
7813
7814
7815void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
7816 Label invoke, exit;
7817#ifdef ENABLE_LOGGING_AND_PROFILING
7818 Label not_outermost_js, not_outermost_js_2;
7819#endif
7820
7821 // Setup frame.
7822 __ push(ebp);
7823 __ mov(ebp, Operand(esp));
7824
7825 // Push marker in two places.
7826 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
7827 __ push(Immediate(Smi::FromInt(marker))); // context slot
7828 __ push(Immediate(Smi::FromInt(marker))); // function slot
7829 // Save callee-saved registers (C calling conventions).
7830 __ push(edi);
7831 __ push(esi);
7832 __ push(ebx);
7833
7834 // Save copies of the top frame descriptor on the stack.
7835 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
7836 __ push(Operand::StaticVariable(c_entry_fp));
7837
7838#ifdef ENABLE_LOGGING_AND_PROFILING
7839 // If this is the outermost JS call, set js_entry_sp value.
7840 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
7841 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
7842 __ j(not_equal, &not_outermost_js);
7843 __ mov(Operand::StaticVariable(js_entry_sp), ebp);
7844 __ bind(&not_outermost_js);
7845#endif
7846
7847 // Call a faked try-block that does the invoke.
7848 __ call(&invoke);
7849
7850 // Caught exception: Store result (exception) in the pending
7851 // exception field in the JSEnv and return a failure sentinel.
7852 ExternalReference pending_exception(Top::k_pending_exception_address);
7853 __ mov(Operand::StaticVariable(pending_exception), eax);
7854 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
7855 __ jmp(&exit);
7856
7857 // Invoke: Link this frame into the handler chain.
7858 __ bind(&invoke);
7859 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
7860
7861 // Clear any pending exceptions.
7862 __ mov(edx,
7863 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
7864 __ mov(Operand::StaticVariable(pending_exception), edx);
7865
7866 // Fake a receiver (NULL).
7867 __ push(Immediate(0)); // receiver
7868
7869 // Invoke the function by calling through JS entry trampoline
7870 // builtin and pop the faked function when we return. Notice that we
7871 // cannot store a reference to the trampoline code directly in this
7872 // stub, because the builtin stubs may not have been generated yet.
7873 if (is_construct) {
7874 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
7875 __ mov(edx, Immediate(construct_entry));
7876 } else {
7877 ExternalReference entry(Builtins::JSEntryTrampoline);
7878 __ mov(edx, Immediate(entry));
7879 }
7880 __ mov(edx, Operand(edx, 0)); // deref address
7881 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
7882 __ call(Operand(edx));
7883
7884 // Unlink this frame from the handler chain.
7885 __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
7886 // Pop next_sp.
7887 __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
7888
7889#ifdef ENABLE_LOGGING_AND_PROFILING
7890 // If current EBP value is the same as js_entry_sp value, it means that
7891 // the current function is the outermost.
7892 __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
7893 __ j(not_equal, &not_outermost_js_2);
7894 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
7895 __ bind(&not_outermost_js_2);
7896#endif
7897
7898 // Restore the top frame descriptor from the stack.
7899 __ bind(&exit);
7900 __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
7901
7902 // Restore callee-saved registers (C calling conventions).
7903 __ pop(ebx);
7904 __ pop(esi);
7905 __ pop(edi);
7906 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
7907
7908 // Restore frame pointer and return.
7909 __ pop(ebp);
7910 __ ret(0);
7911}
7912
7913
7914void InstanceofStub::Generate(MacroAssembler* masm) {
7915 // Get the object - go slow case if it's a smi.
7916 Label slow;
7917 __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
7918 __ test(eax, Immediate(kSmiTagMask));
7919 __ j(zero, &slow, not_taken);
7920
7921 // Check that the left hand is a JS object.
7922 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
7923 __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
7924 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
7925 __ j(less, &slow, not_taken);
7926 __ cmp(ecx, LAST_JS_OBJECT_TYPE);
7927 __ j(greater, &slow, not_taken);
7928
7929 // Get the prototype of the function.
7930 __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
7931 __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
7932
7933 // Check that the function prototype is a JS object.
7934 __ test(ebx, Immediate(kSmiTagMask));
7935 __ j(zero, &slow, not_taken);
7936 __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
7937 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
7938 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
7939 __ j(less, &slow, not_taken);
7940 __ cmp(ecx, LAST_JS_OBJECT_TYPE);
7941 __ j(greater, &slow, not_taken);
7942
7943 // Register mapping: eax is object map and ebx is function prototype.
7944 __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
7945
7946 // Loop through the prototype chain looking for the function prototype.
7947 Label loop, is_instance, is_not_instance;
7948 __ bind(&loop);
7949 __ cmp(ecx, Operand(ebx));
7950 __ j(equal, &is_instance);
7951 __ cmp(Operand(ecx), Immediate(Factory::null_value()));
7952 __ j(equal, &is_not_instance);
7953 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
7954 __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
7955 __ jmp(&loop);
7956
7957 __ bind(&is_instance);
7958 __ Set(eax, Immediate(0));
7959 __ ret(2 * kPointerSize);
7960
7961 __ bind(&is_not_instance);
7962 __ Set(eax, Immediate(Smi::FromInt(1)));
7963 __ ret(2 * kPointerSize);
7964
7965 // Slow-case: Go through the JavaScript implementation.
7966 __ bind(&slow);
7967 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
7968}
7969
7970
7971int CompareStub::MinorKey() {
7972 // Encode the two parameters in a unique 16 bit value.
7973 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7974 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7975}
7976
7977#undef __
7978
7979} } // namespace v8::internal