blob: 47f0e9636e9c3a82f54662de65679b2d85857791 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "debug.h"
33#include "parser.h"
34#include "register-allocator-inl.h"
35#include "runtime.h"
36#include "scopes.h"
37
38
39namespace v8 {
40namespace internal {
41
42#define __ ACCESS_MASM(masm_)
43
44static void EmitIdenticalObjectComparison(MacroAssembler* masm,
45 Label* slow,
46 Condition cc);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Label* rhs_not_nan,
49 Label* slow,
50 bool strict);
51static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
52static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
53static void MultiplyByKnownInt(MacroAssembler* masm,
54 Register source,
55 Register destination,
56 int known_int);
57static bool IsEasyToMultiplyBy(int x);
58
59
60
61// -------------------------------------------------------------------------
62// Platform-specific DeferredCode functions.
63
64void DeferredCode::SaveRegisters() {
65 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
66 int action = registers_[i];
67 if (action == kPush) {
68 __ push(RegisterAllocator::ToRegister(i));
69 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
70 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
71 }
72 }
73}
74
75
76void DeferredCode::RestoreRegisters() {
77 // Restore registers in reverse order due to the stack.
78 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
79 int action = registers_[i];
80 if (action == kPush) {
81 __ pop(RegisterAllocator::ToRegister(i));
82 } else if (action != kIgnore) {
83 action &= ~kSyncedFlag;
84 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
85 }
86 }
87}
88
89
90// -------------------------------------------------------------------------
91// CodeGenState implementation.
92
93CodeGenState::CodeGenState(CodeGenerator* owner)
94 : owner_(owner),
95 typeof_state_(NOT_INSIDE_TYPEOF),
96 true_target_(NULL),
97 false_target_(NULL),
98 previous_(NULL) {
99 owner_->set_state(this);
100}
101
102
103CodeGenState::CodeGenState(CodeGenerator* owner,
104 TypeofState typeof_state,
105 JumpTarget* true_target,
106 JumpTarget* false_target)
107 : owner_(owner),
108 typeof_state_(typeof_state),
109 true_target_(true_target),
110 false_target_(false_target),
111 previous_(owner->state()) {
112 owner_->set_state(this);
113}
114
115
116CodeGenState::~CodeGenState() {
117 ASSERT(owner_->state() == this);
118 owner_->set_state(previous_);
119}
120
121
122// -------------------------------------------------------------------------
123// CodeGenerator implementation
124
125CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
126 bool is_eval)
127 : is_eval_(is_eval),
128 script_(script),
129 deferred_(8),
130 masm_(new MacroAssembler(NULL, buffer_size)),
131 scope_(NULL),
132 frame_(NULL),
133 allocator_(NULL),
134 cc_reg_(al),
135 state_(NULL),
136 function_return_is_shadowed_(false) {
137}
138
139
140// Calling conventions:
141// fp: caller's frame pointer
142// sp: stack pointer
143// r1: called JS function
144// cp: callee's context
145
146void CodeGenerator::GenCode(FunctionLiteral* fun) {
147 ZoneList<Statement*>* body = fun->body();
148
149 // Initialize state.
150 ASSERT(scope_ == NULL);
151 scope_ = fun->scope();
152 ASSERT(allocator_ == NULL);
153 RegisterAllocator register_allocator(this);
154 allocator_ = &register_allocator;
155 ASSERT(frame_ == NULL);
156 frame_ = new VirtualFrame();
157 cc_reg_ = al;
158 {
159 CodeGenState state(this);
160
161 // Entry:
162 // Stack: receiver, arguments
163 // lr: return address
164 // fp: caller's frame pointer
165 // sp: stack pointer
166 // r1: called JS function
167 // cp: callee's context
168 allocator_->Initialize();
169 frame_->Enter();
170 // tos: code slot
171#ifdef DEBUG
172 if (strlen(FLAG_stop_at) > 0 &&
173 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
174 frame_->SpillAll();
175 __ stop("stop-at");
176 }
177#endif
178
179 // Allocate space for locals and initialize them. This also checks
180 // for stack overflow.
181 frame_->AllocateStackSlots();
182 // Initialize the function return target after the locals are set
183 // up, because it needs the expected frame height from the frame.
184 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
185 function_return_is_shadowed_ = false;
186
187 VirtualFrame::SpilledScope spilled_scope;
188 if (scope_->num_heap_slots() > 0) {
189 // Allocate local context.
190 // Get outer context and create a new context based on it.
191 __ ldr(r0, frame_->Function());
192 frame_->EmitPush(r0);
193 frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
194
195#ifdef DEBUG
196 JumpTarget verified_true;
197 __ cmp(r0, Operand(cp));
198 verified_true.Branch(eq);
199 __ stop("NewContext: r0 is expected to be the same as cp");
200 verified_true.Bind();
201#endif
202 // Update context local.
203 __ str(cp, frame_->Context());
204 }
205
206 // TODO(1241774): Improve this code:
207 // 1) only needed if we have a context
208 // 2) no need to recompute context ptr every single time
209 // 3) don't copy parameter operand code from SlotOperand!
210 {
211 Comment cmnt2(masm_, "[ copy context parameters into .context");
212
213 // Note that iteration order is relevant here! If we have the same
214 // parameter twice (e.g., function (x, y, x)), and that parameter
215 // needs to be copied into the context, it must be the last argument
216 // passed to the parameter that needs to be copied. This is a rare
217 // case so we don't check for it, instead we rely on the copying
218 // order: such a parameter is copied repeatedly into the same
219 // context location and thus the last value is what is seen inside
220 // the function.
221 for (int i = 0; i < scope_->num_parameters(); i++) {
222 Variable* par = scope_->parameter(i);
223 Slot* slot = par->slot();
224 if (slot != NULL && slot->type() == Slot::CONTEXT) {
225 ASSERT(!scope_->is_global_scope()); // no parameters in global scope
226 __ ldr(r1, frame_->ParameterAt(i));
227 // Loads r2 with context; used below in RecordWrite.
228 __ str(r1, SlotOperand(slot, r2));
229 // Load the offset into r3.
230 int slot_offset =
231 FixedArray::kHeaderSize + slot->index() * kPointerSize;
232 __ mov(r3, Operand(slot_offset));
233 __ RecordWrite(r2, r3, r1);
234 }
235 }
236 }
237
238 // Store the arguments object. This must happen after context
239 // initialization because the arguments object may be stored in the
240 // context.
241 if (scope_->arguments() != NULL) {
242 ASSERT(scope_->arguments_shadow() != NULL);
243 Comment cmnt(masm_, "[ allocate arguments object");
244 { Reference shadow_ref(this, scope_->arguments_shadow());
245 { Reference arguments_ref(this, scope_->arguments());
246 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
247 __ ldr(r2, frame_->Function());
248 // The receiver is below the arguments, the return address,
249 // and the frame pointer on the stack.
250 const int kReceiverDisplacement = 2 + scope_->num_parameters();
251 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
252 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
253 frame_->Adjust(3);
254 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
255 frame_->CallStub(&stub, 3);
256 frame_->EmitPush(r0);
257 arguments_ref.SetValue(NOT_CONST_INIT);
258 }
259 shadow_ref.SetValue(NOT_CONST_INIT);
260 }
261 frame_->Drop(); // Value is no longer needed.
262 }
263
264 // Generate code to 'execute' declarations and initialize functions
265 // (source elements). In case of an illegal redeclaration we need to
266 // handle that instead of processing the declarations.
267 if (scope_->HasIllegalRedeclaration()) {
268 Comment cmnt(masm_, "[ illegal redeclarations");
269 scope_->VisitIllegalRedeclaration(this);
270 } else {
271 Comment cmnt(masm_, "[ declarations");
272 ProcessDeclarations(scope_->declarations());
273 // Bail out if a stack-overflow exception occurred when processing
274 // declarations.
275 if (HasStackOverflow()) return;
276 }
277
278 if (FLAG_trace) {
279 frame_->CallRuntime(Runtime::kTraceEnter, 0);
280 // Ignore the return value.
281 }
282
283 // Compile the body of the function in a vanilla state. Don't
284 // bother compiling all the code if the scope has an illegal
285 // redeclaration.
286 if (!scope_->HasIllegalRedeclaration()) {
287 Comment cmnt(masm_, "[ function body");
288#ifdef DEBUG
289 bool is_builtin = Bootstrapper::IsActive();
290 bool should_trace =
291 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
292 if (should_trace) {
293 frame_->CallRuntime(Runtime::kDebugTrace, 0);
294 // Ignore the return value.
295 }
296#endif
297 VisitStatementsAndSpill(body);
298 }
299 }
300
301 // Generate the return sequence if necessary.
302 if (has_valid_frame() || function_return_.is_linked()) {
303 if (!function_return_.is_linked()) {
304 CodeForReturnPosition(fun);
305 }
306 // exit
307 // r0: result
308 // sp: stack pointer
309 // fp: frame pointer
310 // cp: callee's context
311 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
312
313 function_return_.Bind();
314 if (FLAG_trace) {
315 // Push the return value on the stack as the parameter.
316 // Runtime::TraceExit returns the parameter as it is.
317 frame_->EmitPush(r0);
318 frame_->CallRuntime(Runtime::kTraceExit, 1);
319 }
320
321 // Add a label for checking the size of the code used for returning.
322 Label check_exit_codesize;
323 masm_->bind(&check_exit_codesize);
324
325 // Tear down the frame which will restore the caller's frame pointer and
326 // the link register.
327 frame_->Exit();
328
329 // Here we use masm_-> instead of the __ macro to avoid the code coverage
330 // tool from instrumenting as we rely on the code size here.
331 masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
332 masm_->Jump(lr);
333
334 // Check that the size of the code used for returning matches what is
335 // expected by the debugger.
336 ASSERT_EQ(kJSReturnSequenceLength,
337 masm_->InstructionsGeneratedSince(&check_exit_codesize));
338 }
339
340 // Code generation state must be reset.
341 ASSERT(!has_cc());
342 ASSERT(state_ == NULL);
343 ASSERT(!function_return_is_shadowed_);
344 function_return_.Unuse();
345 DeleteFrame();
346
347 // Process any deferred code using the register allocator.
348 if (!HasStackOverflow()) {
349 ProcessDeferred();
350 }
351
352 allocator_ = NULL;
353 scope_ = NULL;
354}
355
356
357MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
358 // Currently, this assertion will fail if we try to assign to
359 // a constant variable that is constant because it is read-only
360 // (such as the variable referring to a named function expression).
361 // We need to implement assignments to read-only variables.
362 // Ideally, we should do this during AST generation (by converting
363 // such assignments into expression statements); however, in general
364 // we may not be able to make the decision until past AST generation,
365 // that is when the entire program is known.
366 ASSERT(slot != NULL);
367 int index = slot->index();
368 switch (slot->type()) {
369 case Slot::PARAMETER:
370 return frame_->ParameterAt(index);
371
372 case Slot::LOCAL:
373 return frame_->LocalAt(index);
374
375 case Slot::CONTEXT: {
376 // Follow the context chain if necessary.
377 ASSERT(!tmp.is(cp)); // do not overwrite context register
378 Register context = cp;
379 int chain_length = scope()->ContextChainLength(slot->var()->scope());
380 for (int i = 0; i < chain_length; i++) {
381 // Load the closure.
382 // (All contexts, even 'with' contexts, have a closure,
383 // and it is the same for all contexts inside a function.
384 // There is no need to go to the function context first.)
385 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
386 // Load the function context (which is the incoming, outer context).
387 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
388 context = tmp;
389 }
390 // We may have a 'with' context now. Get the function context.
391 // (In fact this mov may never be the needed, since the scope analysis
392 // may not permit a direct context access in this case and thus we are
393 // always at a function context. However it is safe to dereference be-
394 // cause the function context of a function context is itself. Before
395 // deleting this mov we should try to create a counter-example first,
396 // though...)
397 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
398 return ContextOperand(tmp, index);
399 }
400
401 default:
402 UNREACHABLE();
403 return MemOperand(r0, 0);
404 }
405}
406
407
408MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
409 Slot* slot,
410 Register tmp,
411 Register tmp2,
412 JumpTarget* slow) {
413 ASSERT(slot->type() == Slot::CONTEXT);
414 Register context = cp;
415
416 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
417 if (s->num_heap_slots() > 0) {
418 if (s->calls_eval()) {
419 // Check that extension is NULL.
420 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
421 __ tst(tmp2, tmp2);
422 slow->Branch(ne);
423 }
424 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
425 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
426 context = tmp;
427 }
428 }
429 // Check that last extension is NULL.
430 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
431 __ tst(tmp2, tmp2);
432 slow->Branch(ne);
433 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
434 return ContextOperand(tmp, slot->index());
435}
436
437
438// Loads a value on TOS. If it is a boolean value, the result may have been
439// (partially) translated into branches, or it may have set the condition
440// code register. If force_cc is set, the value is forced to set the
441// condition code register and no value is pushed. If the condition code
442// register was set, has_cc() is true and cc_reg_ contains the condition to
443// test for 'true'.
444void CodeGenerator::LoadCondition(Expression* x,
445 TypeofState typeof_state,
446 JumpTarget* true_target,
447 JumpTarget* false_target,
448 bool force_cc) {
449 ASSERT(!has_cc());
450 int original_height = frame_->height();
451
452 { CodeGenState new_state(this, typeof_state, true_target, false_target);
453 Visit(x);
454
455 // If we hit a stack overflow, we may not have actually visited
456 // the expression. In that case, we ensure that we have a
457 // valid-looking frame state because we will continue to generate
458 // code as we unwind the C++ stack.
459 //
460 // It's possible to have both a stack overflow and a valid frame
461 // state (eg, a subexpression overflowed, visiting it returned
462 // with a dummied frame state, and visiting this expression
463 // returned with a normal-looking state).
464 if (HasStackOverflow() &&
465 has_valid_frame() &&
466 !has_cc() &&
467 frame_->height() == original_height) {
468 true_target->Jump();
469 }
470 }
471 if (force_cc && frame_ != NULL && !has_cc()) {
472 // Convert the TOS value to a boolean in the condition code register.
473 ToBoolean(true_target, false_target);
474 }
475 ASSERT(!force_cc || !has_valid_frame() || has_cc());
476 ASSERT(!has_valid_frame() ||
477 (has_cc() && frame_->height() == original_height) ||
478 (!has_cc() && frame_->height() == original_height + 1));
479}
480
481
482void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
483#ifdef DEBUG
484 int original_height = frame_->height();
485#endif
486 JumpTarget true_target;
487 JumpTarget false_target;
488 LoadCondition(x, typeof_state, &true_target, &false_target, false);
489
490 if (has_cc()) {
491 // Convert cc_reg_ into a boolean value.
492 JumpTarget loaded;
493 JumpTarget materialize_true;
494 materialize_true.Branch(cc_reg_);
495 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
496 frame_->EmitPush(r0);
497 loaded.Jump();
498 materialize_true.Bind();
499 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
500 frame_->EmitPush(r0);
501 loaded.Bind();
502 cc_reg_ = al;
503 }
504
505 if (true_target.is_linked() || false_target.is_linked()) {
506 // We have at least one condition value that has been "translated"
507 // into a branch, thus it needs to be loaded explicitly.
508 JumpTarget loaded;
509 if (frame_ != NULL) {
510 loaded.Jump(); // Don't lose the current TOS.
511 }
512 bool both = true_target.is_linked() && false_target.is_linked();
513 // Load "true" if necessary.
514 if (true_target.is_linked()) {
515 true_target.Bind();
516 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
517 frame_->EmitPush(r0);
518 }
519 // If both "true" and "false" need to be loaded jump across the code for
520 // "false".
521 if (both) {
522 loaded.Jump();
523 }
524 // Load "false" if necessary.
525 if (false_target.is_linked()) {
526 false_target.Bind();
527 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
528 frame_->EmitPush(r0);
529 }
530 // A value is loaded on all paths reaching this point.
531 loaded.Bind();
532 }
533 ASSERT(has_valid_frame());
534 ASSERT(!has_cc());
535 ASSERT(frame_->height() == original_height + 1);
536}
537
538
539void CodeGenerator::LoadGlobal() {
540 VirtualFrame::SpilledScope spilled_scope;
541 __ ldr(r0, GlobalObject());
542 frame_->EmitPush(r0);
543}
544
545
546void CodeGenerator::LoadGlobalReceiver(Register scratch) {
547 VirtualFrame::SpilledScope spilled_scope;
548 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
549 __ ldr(scratch,
550 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
551 frame_->EmitPush(scratch);
552}
553
554
555// TODO(1241834): Get rid of this function in favor of just using Load, now
556// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
557// variables w/o reference errors elsewhere.
558void CodeGenerator::LoadTypeofExpression(Expression* x) {
559 VirtualFrame::SpilledScope spilled_scope;
560 Variable* variable = x->AsVariableProxy()->AsVariable();
561 if (variable != NULL && !variable->is_this() && variable->is_global()) {
562 // NOTE: This is somewhat nasty. We force the compiler to load
563 // the variable as if through '<global>.<variable>' to make sure we
564 // do not get reference errors.
565 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
566 Literal key(variable->name());
567 // TODO(1241834): Fetch the position from the variable instead of using
568 // no position.
569 Property property(&global, &key, RelocInfo::kNoPosition);
570 LoadAndSpill(&property);
571 } else {
572 LoadAndSpill(x, INSIDE_TYPEOF);
573 }
574}
575
576
577Reference::Reference(CodeGenerator* cgen, Expression* expression)
578 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
579 cgen->LoadReference(this);
580}
581
582
583Reference::~Reference() {
584 cgen_->UnloadReference(this);
585}
586
587
588void CodeGenerator::LoadReference(Reference* ref) {
589 VirtualFrame::SpilledScope spilled_scope;
590 Comment cmnt(masm_, "[ LoadReference");
591 Expression* e = ref->expression();
592 Property* property = e->AsProperty();
593 Variable* var = e->AsVariableProxy()->AsVariable();
594
595 if (property != NULL) {
596 // The expression is either a property or a variable proxy that rewrites
597 // to a property.
598 LoadAndSpill(property->obj());
599 // We use a named reference if the key is a literal symbol, unless it is
600 // a string that can be legally parsed as an integer. This is because
601 // otherwise we will not get into the slow case code that handles [] on
602 // String objects.
603 Literal* literal = property->key()->AsLiteral();
604 uint32_t dummy;
605 if (literal != NULL &&
606 literal->handle()->IsSymbol() &&
607 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
608 ref->set_type(Reference::NAMED);
609 } else {
610 LoadAndSpill(property->key());
611 ref->set_type(Reference::KEYED);
612 }
613 } else if (var != NULL) {
614 // The expression is a variable proxy that does not rewrite to a
615 // property. Global variables are treated as named property references.
616 if (var->is_global()) {
617 LoadGlobal();
618 ref->set_type(Reference::NAMED);
619 } else {
620 ASSERT(var->slot() != NULL);
621 ref->set_type(Reference::SLOT);
622 }
623 } else {
624 // Anything else is a runtime error.
625 LoadAndSpill(e);
626 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
627 }
628}
629
630
631void CodeGenerator::UnloadReference(Reference* ref) {
632 VirtualFrame::SpilledScope spilled_scope;
633 // Pop a reference from the stack while preserving TOS.
634 Comment cmnt(masm_, "[ UnloadReference");
635 int size = ref->size();
636 if (size > 0) {
637 frame_->EmitPop(r0);
638 frame_->Drop(size);
639 frame_->EmitPush(r0);
640 }
641}
642
643
644// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
645// register to a boolean in the condition code register. The code
646// may jump to 'false_target' in case the register converts to 'false'.
647void CodeGenerator::ToBoolean(JumpTarget* true_target,
648 JumpTarget* false_target) {
649 VirtualFrame::SpilledScope spilled_scope;
650 // Note: The generated code snippet does not change stack variables.
651 // Only the condition code should be set.
652 frame_->EmitPop(r0);
653
654 // Fast case checks
655
656 // Check if the value is 'false'.
657 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
658 __ cmp(r0, ip);
659 false_target->Branch(eq);
660
661 // Check if the value is 'true'.
662 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
663 __ cmp(r0, ip);
664 true_target->Branch(eq);
665
666 // Check if the value is 'undefined'.
667 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
668 __ cmp(r0, ip);
669 false_target->Branch(eq);
670
671 // Check if the value is a smi.
672 __ cmp(r0, Operand(Smi::FromInt(0)));
673 false_target->Branch(eq);
674 __ tst(r0, Operand(kSmiTagMask));
675 true_target->Branch(eq);
676
677 // Slow case: call the runtime.
678 frame_->EmitPush(r0);
679 frame_->CallRuntime(Runtime::kToBool, 1);
680 // Convert the result (r0) to a condition code.
681 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
682 __ cmp(r0, ip);
683
684 cc_reg_ = ne;
685}
686
687
688void CodeGenerator::GenericBinaryOperation(Token::Value op,
689 OverwriteMode overwrite_mode,
690 int constant_rhs) {
691 VirtualFrame::SpilledScope spilled_scope;
692 // sp[0] : y
693 // sp[1] : x
694 // result : r0
695
696 // Stub is entered with a call: 'return address' is in lr.
697 switch (op) {
698 case Token::ADD: // fall through.
699 case Token::SUB: // fall through.
700 case Token::MUL:
701 case Token::DIV:
702 case Token::MOD:
703 case Token::BIT_OR:
704 case Token::BIT_AND:
705 case Token::BIT_XOR:
706 case Token::SHL:
707 case Token::SHR:
708 case Token::SAR: {
709 frame_->EmitPop(r0); // r0 : y
710 frame_->EmitPop(r1); // r1 : x
711 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
712 frame_->CallStub(&stub, 0);
713 break;
714 }
715
716 case Token::COMMA:
717 frame_->EmitPop(r0);
718 // simply discard left value
719 frame_->Drop();
720 break;
721
722 default:
723 // Other cases should have been handled before this point.
724 UNREACHABLE();
725 break;
726 }
727}
728
729
730class DeferredInlineSmiOperation: public DeferredCode {
731 public:
732 DeferredInlineSmiOperation(Token::Value op,
733 int value,
734 bool reversed,
735 OverwriteMode overwrite_mode)
736 : op_(op),
737 value_(value),
738 reversed_(reversed),
739 overwrite_mode_(overwrite_mode) {
740 set_comment("[ DeferredInlinedSmiOperation");
741 }
742
743 virtual void Generate();
744
745 private:
746 Token::Value op_;
747 int value_;
748 bool reversed_;
749 OverwriteMode overwrite_mode_;
750};
751
752
753void DeferredInlineSmiOperation::Generate() {
754 switch (op_) {
755 case Token::ADD: {
756 // Revert optimistic add.
757 if (reversed_) {
758 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
759 __ mov(r1, Operand(Smi::FromInt(value_)));
760 } else {
761 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
762 __ mov(r0, Operand(Smi::FromInt(value_)));
763 }
764 break;
765 }
766
767 case Token::SUB: {
768 // Revert optimistic sub.
769 if (reversed_) {
770 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
771 __ mov(r1, Operand(Smi::FromInt(value_)));
772 } else {
773 __ add(r1, r0, Operand(Smi::FromInt(value_)));
774 __ mov(r0, Operand(Smi::FromInt(value_)));
775 }
776 break;
777 }
778
779 // For these operations there is no optimistic operation that needs to be
780 // reverted.
781 case Token::MUL:
782 case Token::MOD:
783 case Token::BIT_OR:
784 case Token::BIT_XOR:
785 case Token::BIT_AND: {
786 if (reversed_) {
787 __ mov(r1, Operand(Smi::FromInt(value_)));
788 } else {
789 __ mov(r1, Operand(r0));
790 __ mov(r0, Operand(Smi::FromInt(value_)));
791 }
792 break;
793 }
794
795 case Token::SHL:
796 case Token::SHR:
797 case Token::SAR: {
798 if (!reversed_) {
799 __ mov(r1, Operand(r0));
800 __ mov(r0, Operand(Smi::FromInt(value_)));
801 } else {
802 UNREACHABLE(); // Should have been handled in SmiOperation.
803 }
804 break;
805 }
806
807 default:
808 // Other cases should have been handled before this point.
809 UNREACHABLE();
810 break;
811 }
812
813 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
814 __ CallStub(&stub);
815}
816
817
818static bool PopCountLessThanEqual2(unsigned int x) {
819 x &= x - 1;
820 return (x & (x - 1)) == 0;
821}
822
823
824// Returns the index of the lowest bit set.
825static int BitPosition(unsigned x) {
826 int bit_posn = 0;
827 while ((x & 0xf) == 0) {
828 bit_posn += 4;
829 x >>= 4;
830 }
831 while ((x & 1) == 0) {
832 bit_posn++;
833 x >>= 1;
834 }
835 return bit_posn;
836}
837
838
839void CodeGenerator::SmiOperation(Token::Value op,
840 Handle<Object> value,
841 bool reversed,
842 OverwriteMode mode) {
843 VirtualFrame::SpilledScope spilled_scope;
844 // NOTE: This is an attempt to inline (a bit) more of the code for
845 // some possible smi operations (like + and -) when (at least) one
846 // of the operands is a literal smi. With this optimization, the
847 // performance of the system is increased by ~15%, and the generated
848 // code size is increased by ~1% (measured on a combination of
849 // different benchmarks).
850
851 // sp[0] : operand
852
853 int int_value = Smi::cast(*value)->value();
854
855 JumpTarget exit;
856 frame_->EmitPop(r0);
857
858 bool something_to_inline = true;
859 switch (op) {
860 case Token::ADD: {
861 DeferredCode* deferred =
862 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
863
864 __ add(r0, r0, Operand(value), SetCC);
865 deferred->Branch(vs);
866 __ tst(r0, Operand(kSmiTagMask));
867 deferred->Branch(ne);
868 deferred->BindExit();
869 break;
870 }
871
872 case Token::SUB: {
873 DeferredCode* deferred =
874 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
875
876 if (reversed) {
877 __ rsb(r0, r0, Operand(value), SetCC);
878 } else {
879 __ sub(r0, r0, Operand(value), SetCC);
880 }
881 deferred->Branch(vs);
882 __ tst(r0, Operand(kSmiTagMask));
883 deferred->Branch(ne);
884 deferred->BindExit();
885 break;
886 }
887
888
889 case Token::BIT_OR:
890 case Token::BIT_XOR:
891 case Token::BIT_AND: {
892 DeferredCode* deferred =
893 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
894 __ tst(r0, Operand(kSmiTagMask));
895 deferred->Branch(ne);
896 switch (op) {
897 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
898 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
899 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
900 default: UNREACHABLE();
901 }
902 deferred->BindExit();
903 break;
904 }
905
906 case Token::SHL:
907 case Token::SHR:
908 case Token::SAR: {
909 if (reversed) {
910 something_to_inline = false;
911 break;
912 }
913 int shift_value = int_value & 0x1f; // least significant 5 bits
914 DeferredCode* deferred =
915 new DeferredInlineSmiOperation(op, shift_value, false, mode);
916 __ tst(r0, Operand(kSmiTagMask));
917 deferred->Branch(ne);
918 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
919 switch (op) {
920 case Token::SHL: {
921 if (shift_value != 0) {
922 __ mov(r2, Operand(r2, LSL, shift_value));
923 }
924 // check that the *unsigned* result fits in a smi
925 __ add(r3, r2, Operand(0x40000000), SetCC);
926 deferred->Branch(mi);
927 break;
928 }
929 case Token::SHR: {
930 // LSR by immediate 0 means shifting 32 bits.
931 if (shift_value != 0) {
932 __ mov(r2, Operand(r2, LSR, shift_value));
933 }
934 // check that the *unsigned* result fits in a smi
935 // neither of the two high-order bits can be set:
936 // - 0x80000000: high bit would be lost when smi tagging
937 // - 0x40000000: this number would convert to negative when
938 // smi tagging these two cases can only happen with shifts
939 // by 0 or 1 when handed a valid smi
940 __ and_(r3, r2, Operand(0xc0000000), SetCC);
941 deferred->Branch(ne);
942 break;
943 }
944 case Token::SAR: {
945 if (shift_value != 0) {
946 // ASR by immediate 0 means shifting 32 bits.
947 __ mov(r2, Operand(r2, ASR, shift_value));
948 }
949 break;
950 }
951 default: UNREACHABLE();
952 }
953 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
954 deferred->BindExit();
955 break;
956 }
957
958 case Token::MOD: {
959 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
960 something_to_inline = false;
961 break;
962 }
963 DeferredCode* deferred =
964 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
965 unsigned mask = (0x80000000u | kSmiTagMask);
966 __ tst(r0, Operand(mask));
967 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
968 mask = (int_value << kSmiTagSize) - 1;
969 __ and_(r0, r0, Operand(mask));
970 deferred->BindExit();
971 break;
972 }
973
974 case Token::MUL: {
975 if (!IsEasyToMultiplyBy(int_value)) {
976 something_to_inline = false;
977 break;
978 }
979 DeferredCode* deferred =
980 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
981 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
982 max_smi_that_wont_overflow <<= kSmiTagSize;
983 unsigned mask = 0x80000000u;
984 while ((mask & max_smi_that_wont_overflow) == 0) {
985 mask |= mask >> 1;
986 }
987 mask |= kSmiTagMask;
988 // This does a single mask that checks for a too high value in a
989 // conservative way and for a non-Smi. It also filters out negative
990 // numbers, unfortunately, but since this code is inline we prefer
991 // brevity to comprehensiveness.
992 __ tst(r0, Operand(mask));
993 deferred->Branch(ne);
994 MultiplyByKnownInt(masm_, r0, r0, int_value);
995 deferred->BindExit();
996 break;
997 }
998
999 default:
1000 something_to_inline = false;
1001 break;
1002 }
1003
1004 if (!something_to_inline) {
1005 if (!reversed) {
1006 frame_->EmitPush(r0);
1007 __ mov(r0, Operand(value));
1008 frame_->EmitPush(r0);
1009 GenericBinaryOperation(op, mode, int_value);
1010 } else {
1011 __ mov(ip, Operand(value));
1012 frame_->EmitPush(ip);
1013 frame_->EmitPush(r0);
1014 GenericBinaryOperation(op, mode, kUnknownIntValue);
1015 }
1016 }
1017
1018 exit.Bind();
1019}
1020
1021
1022void CodeGenerator::Comparison(Condition cc,
1023 Expression* left,
1024 Expression* right,
1025 bool strict) {
1026 if (left != NULL) LoadAndSpill(left);
1027 if (right != NULL) LoadAndSpill(right);
1028
1029 VirtualFrame::SpilledScope spilled_scope;
1030 // sp[0] : y
1031 // sp[1] : x
1032 // result : cc register
1033
1034 // Strict only makes sense for equality comparisons.
1035 ASSERT(!strict || cc == eq);
1036
1037 JumpTarget exit;
1038 JumpTarget smi;
1039 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1040 if (cc == gt || cc == le) {
1041 cc = ReverseCondition(cc);
1042 frame_->EmitPop(r1);
1043 frame_->EmitPop(r0);
1044 } else {
1045 frame_->EmitPop(r0);
1046 frame_->EmitPop(r1);
1047 }
1048 __ orr(r2, r0, Operand(r1));
1049 __ tst(r2, Operand(kSmiTagMask));
1050 smi.Branch(eq);
1051
1052 // Perform non-smi comparison by stub.
1053 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1054 // We call with 0 args because there are 0 on the stack.
1055 CompareStub stub(cc, strict);
1056 frame_->CallStub(&stub, 0);
1057 __ cmp(r0, Operand(0));
1058 exit.Jump();
1059
1060 // Do smi comparisons by pointer comparison.
1061 smi.Bind();
1062 __ cmp(r1, Operand(r0));
1063
1064 exit.Bind();
1065 cc_reg_ = cc;
1066}
1067
1068
1069class CallFunctionStub: public CodeStub {
1070 public:
1071 CallFunctionStub(int argc, InLoopFlag in_loop)
1072 : argc_(argc), in_loop_(in_loop) {}
1073
1074 void Generate(MacroAssembler* masm);
1075
1076 private:
1077 int argc_;
1078 InLoopFlag in_loop_;
1079
1080#if defined(DEBUG)
1081 void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
1082#endif // defined(DEBUG)
1083
1084 Major MajorKey() { return CallFunction; }
1085 int MinorKey() { return argc_; }
1086 InLoopFlag InLoop() { return in_loop_; }
1087};
1088
1089
1090// Call the function on the stack with the given arguments.
1091void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1092 int position) {
1093 VirtualFrame::SpilledScope spilled_scope;
1094 // Push the arguments ("left-to-right") on the stack.
1095 int arg_count = args->length();
1096 for (int i = 0; i < arg_count; i++) {
1097 LoadAndSpill(args->at(i));
1098 }
1099
1100 // Record the position for debugging purposes.
1101 CodeForSourcePosition(position);
1102
1103 // Use the shared code stub to call the function.
1104 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1105 CallFunctionStub call_function(arg_count, in_loop);
1106 frame_->CallStub(&call_function, arg_count + 1);
1107
1108 // Restore context and pop function from the stack.
1109 __ ldr(cp, frame_->Context());
1110 frame_->Drop(); // discard the TOS
1111}
1112
1113
1114void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1115 VirtualFrame::SpilledScope spilled_scope;
1116 ASSERT(has_cc());
1117 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1118 target->Branch(cc);
1119 cc_reg_ = al;
1120}
1121
1122
1123void CodeGenerator::CheckStack() {
1124 VirtualFrame::SpilledScope spilled_scope;
1125 if (FLAG_check_stack) {
1126 Comment cmnt(masm_, "[ check stack");
1127 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1128 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1129 // the implicit 8 byte offset that always applies to operations with pc and
1130 // gives a return address 12 bytes down.
1131 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1132 masm_->cmp(sp, Operand(ip));
1133 StackCheckStub stub;
1134 // Call the stub if lower.
1135 masm_->mov(pc,
1136 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1137 RelocInfo::CODE_TARGET),
1138 LeaveCC,
1139 lo);
1140 }
1141}
1142
1143
1144void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1145#ifdef DEBUG
1146 int original_height = frame_->height();
1147#endif
1148 VirtualFrame::SpilledScope spilled_scope;
1149 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1150 VisitAndSpill(statements->at(i));
1151 }
1152 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1153}
1154
1155
1156void CodeGenerator::VisitBlock(Block* node) {
1157#ifdef DEBUG
1158 int original_height = frame_->height();
1159#endif
1160 VirtualFrame::SpilledScope spilled_scope;
1161 Comment cmnt(masm_, "[ Block");
1162 CodeForStatementPosition(node);
1163 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1164 VisitStatementsAndSpill(node->statements());
1165 if (node->break_target()->is_linked()) {
1166 node->break_target()->Bind();
1167 }
1168 node->break_target()->Unuse();
1169 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1170}
1171
1172
1173void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1174 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001175 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001176 __ mov(r0, Operand(pairs));
1177 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001178 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1179 frame_->EmitPush(r0);
1180 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1181 // The result is discarded.
1182}
1183
1184
1185void CodeGenerator::VisitDeclaration(Declaration* node) {
1186#ifdef DEBUG
1187 int original_height = frame_->height();
1188#endif
1189 VirtualFrame::SpilledScope spilled_scope;
1190 Comment cmnt(masm_, "[ Declaration");
1191 Variable* var = node->proxy()->var();
1192 ASSERT(var != NULL); // must have been resolved
1193 Slot* slot = var->slot();
1194
1195 // If it was not possible to allocate the variable at compile time,
1196 // we need to "declare" it at runtime to make sure it actually
1197 // exists in the local context.
1198 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1199 // Variables with a "LOOKUP" slot were introduced as non-locals
1200 // during variable resolution and must have mode DYNAMIC.
1201 ASSERT(var->is_dynamic());
1202 // For now, just do a runtime call.
1203 frame_->EmitPush(cp);
1204 __ mov(r0, Operand(var->name()));
1205 frame_->EmitPush(r0);
1206 // Declaration nodes are always declared in only two modes.
1207 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1208 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1209 __ mov(r0, Operand(Smi::FromInt(attr)));
1210 frame_->EmitPush(r0);
1211 // Push initial value, if any.
1212 // Note: For variables we must not push an initial value (such as
1213 // 'undefined') because we may have a (legal) redeclaration and we
1214 // must not destroy the current value.
1215 if (node->mode() == Variable::CONST) {
1216 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1217 frame_->EmitPush(r0);
1218 } else if (node->fun() != NULL) {
1219 LoadAndSpill(node->fun());
1220 } else {
1221 __ mov(r0, Operand(0)); // no initial value!
1222 frame_->EmitPush(r0);
1223 }
1224 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1225 // Ignore the return value (declarations are statements).
1226 ASSERT(frame_->height() == original_height);
1227 return;
1228 }
1229
1230 ASSERT(!var->is_global());
1231
1232 // If we have a function or a constant, we need to initialize the variable.
1233 Expression* val = NULL;
1234 if (node->mode() == Variable::CONST) {
1235 val = new Literal(Factory::the_hole_value());
1236 } else {
1237 val = node->fun(); // NULL if we don't have a function
1238 }
1239
1240 if (val != NULL) {
1241 {
1242 // Set initial value.
1243 Reference target(this, node->proxy());
1244 LoadAndSpill(val);
1245 target.SetValue(NOT_CONST_INIT);
1246 // The reference is removed from the stack (preserving TOS) when
1247 // it goes out of scope.
1248 }
1249 // Get rid of the assigned value (declarations are statements).
1250 frame_->Drop();
1251 }
1252 ASSERT(frame_->height() == original_height);
1253}
1254
1255
1256void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1257#ifdef DEBUG
1258 int original_height = frame_->height();
1259#endif
1260 VirtualFrame::SpilledScope spilled_scope;
1261 Comment cmnt(masm_, "[ ExpressionStatement");
1262 CodeForStatementPosition(node);
1263 Expression* expression = node->expression();
1264 expression->MarkAsStatement();
1265 LoadAndSpill(expression);
1266 frame_->Drop();
1267 ASSERT(frame_->height() == original_height);
1268}
1269
1270
1271void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1272#ifdef DEBUG
1273 int original_height = frame_->height();
1274#endif
1275 VirtualFrame::SpilledScope spilled_scope;
1276 Comment cmnt(masm_, "// EmptyStatement");
1277 CodeForStatementPosition(node);
1278 // nothing to do
1279 ASSERT(frame_->height() == original_height);
1280}
1281
1282
1283void CodeGenerator::VisitIfStatement(IfStatement* node) {
1284#ifdef DEBUG
1285 int original_height = frame_->height();
1286#endif
1287 VirtualFrame::SpilledScope spilled_scope;
1288 Comment cmnt(masm_, "[ IfStatement");
1289 // Generate different code depending on which parts of the if statement
1290 // are present or not.
1291 bool has_then_stm = node->HasThenStatement();
1292 bool has_else_stm = node->HasElseStatement();
1293
1294 CodeForStatementPosition(node);
1295
1296 JumpTarget exit;
1297 if (has_then_stm && has_else_stm) {
1298 Comment cmnt(masm_, "[ IfThenElse");
1299 JumpTarget then;
1300 JumpTarget else_;
1301 // if (cond)
1302 LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
1303 &then, &else_, true);
1304 if (frame_ != NULL) {
1305 Branch(false, &else_);
1306 }
1307 // then
1308 if (frame_ != NULL || then.is_linked()) {
1309 then.Bind();
1310 VisitAndSpill(node->then_statement());
1311 }
1312 if (frame_ != NULL) {
1313 exit.Jump();
1314 }
1315 // else
1316 if (else_.is_linked()) {
1317 else_.Bind();
1318 VisitAndSpill(node->else_statement());
1319 }
1320
1321 } else if (has_then_stm) {
1322 Comment cmnt(masm_, "[ IfThen");
1323 ASSERT(!has_else_stm);
1324 JumpTarget then;
1325 // if (cond)
1326 LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
1327 &then, &exit, true);
1328 if (frame_ != NULL) {
1329 Branch(false, &exit);
1330 }
1331 // then
1332 if (frame_ != NULL || then.is_linked()) {
1333 then.Bind();
1334 VisitAndSpill(node->then_statement());
1335 }
1336
1337 } else if (has_else_stm) {
1338 Comment cmnt(masm_, "[ IfElse");
1339 ASSERT(!has_then_stm);
1340 JumpTarget else_;
1341 // if (!cond)
1342 LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
1343 &exit, &else_, true);
1344 if (frame_ != NULL) {
1345 Branch(true, &exit);
1346 }
1347 // else
1348 if (frame_ != NULL || else_.is_linked()) {
1349 else_.Bind();
1350 VisitAndSpill(node->else_statement());
1351 }
1352
1353 } else {
1354 Comment cmnt(masm_, "[ If");
1355 ASSERT(!has_then_stm && !has_else_stm);
1356 // if (cond)
1357 LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
1358 &exit, &exit, false);
1359 if (frame_ != NULL) {
1360 if (has_cc()) {
1361 cc_reg_ = al;
1362 } else {
1363 frame_->Drop();
1364 }
1365 }
1366 }
1367
1368 // end
1369 if (exit.is_linked()) {
1370 exit.Bind();
1371 }
1372 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1373}
1374
1375
1376void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1377 VirtualFrame::SpilledScope spilled_scope;
1378 Comment cmnt(masm_, "[ ContinueStatement");
1379 CodeForStatementPosition(node);
1380 node->target()->continue_target()->Jump();
1381}
1382
1383
1384void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1385 VirtualFrame::SpilledScope spilled_scope;
1386 Comment cmnt(masm_, "[ BreakStatement");
1387 CodeForStatementPosition(node);
1388 node->target()->break_target()->Jump();
1389}
1390
1391
1392void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1393 VirtualFrame::SpilledScope spilled_scope;
1394 Comment cmnt(masm_, "[ ReturnStatement");
1395
1396 CodeForStatementPosition(node);
1397 LoadAndSpill(node->expression());
1398 if (function_return_is_shadowed_) {
1399 frame_->EmitPop(r0);
1400 function_return_.Jump();
1401 } else {
1402 // Pop the result from the frame and prepare the frame for
1403 // returning thus making it easier to merge.
1404 frame_->EmitPop(r0);
1405 frame_->PrepareForReturn();
1406
1407 function_return_.Jump();
1408 }
1409}
1410
1411
1412void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1413#ifdef DEBUG
1414 int original_height = frame_->height();
1415#endif
1416 VirtualFrame::SpilledScope spilled_scope;
1417 Comment cmnt(masm_, "[ WithEnterStatement");
1418 CodeForStatementPosition(node);
1419 LoadAndSpill(node->expression());
1420 if (node->is_catch_block()) {
1421 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1422 } else {
1423 frame_->CallRuntime(Runtime::kPushContext, 1);
1424 }
1425#ifdef DEBUG
1426 JumpTarget verified_true;
1427 __ cmp(r0, Operand(cp));
1428 verified_true.Branch(eq);
1429 __ stop("PushContext: r0 is expected to be the same as cp");
1430 verified_true.Bind();
1431#endif
1432 // Update context local.
1433 __ str(cp, frame_->Context());
1434 ASSERT(frame_->height() == original_height);
1435}
1436
1437
1438void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1439#ifdef DEBUG
1440 int original_height = frame_->height();
1441#endif
1442 VirtualFrame::SpilledScope spilled_scope;
1443 Comment cmnt(masm_, "[ WithExitStatement");
1444 CodeForStatementPosition(node);
1445 // Pop context.
1446 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1447 // Update context local.
1448 __ str(cp, frame_->Context());
1449 ASSERT(frame_->height() == original_height);
1450}
1451
1452
1453void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1454#ifdef DEBUG
1455 int original_height = frame_->height();
1456#endif
1457 VirtualFrame::SpilledScope spilled_scope;
1458 Comment cmnt(masm_, "[ SwitchStatement");
1459 CodeForStatementPosition(node);
1460 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1461
1462 LoadAndSpill(node->tag());
1463
1464 JumpTarget next_test;
1465 JumpTarget fall_through;
1466 JumpTarget default_entry;
1467 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1468 ZoneList<CaseClause*>* cases = node->cases();
1469 int length = cases->length();
1470 CaseClause* default_clause = NULL;
1471
1472 for (int i = 0; i < length; i++) {
1473 CaseClause* clause = cases->at(i);
1474 if (clause->is_default()) {
1475 // Remember the default clause and compile it at the end.
1476 default_clause = clause;
1477 continue;
1478 }
1479
1480 Comment cmnt(masm_, "[ Case clause");
1481 // Compile the test.
1482 next_test.Bind();
1483 next_test.Unuse();
1484 // Duplicate TOS.
1485 __ ldr(r0, frame_->Top());
1486 frame_->EmitPush(r0);
1487 Comparison(eq, NULL, clause->label(), true);
1488 Branch(false, &next_test);
1489
1490 // Before entering the body from the test, remove the switch value from
1491 // the stack.
1492 frame_->Drop();
1493
1494 // Label the body so that fall through is enabled.
1495 if (i > 0 && cases->at(i - 1)->is_default()) {
1496 default_exit.Bind();
1497 } else {
1498 fall_through.Bind();
1499 fall_through.Unuse();
1500 }
1501 VisitStatementsAndSpill(clause->statements());
1502
1503 // If control flow can fall through from the body, jump to the next body
1504 // or the end of the statement.
1505 if (frame_ != NULL) {
1506 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1507 default_entry.Jump();
1508 } else {
1509 fall_through.Jump();
1510 }
1511 }
1512 }
1513
1514 // The final "test" removes the switch value.
1515 next_test.Bind();
1516 frame_->Drop();
1517
1518 // If there is a default clause, compile it.
1519 if (default_clause != NULL) {
1520 Comment cmnt(masm_, "[ Default clause");
1521 default_entry.Bind();
1522 VisitStatementsAndSpill(default_clause->statements());
1523 // If control flow can fall out of the default and there is a case after
1524 // it, jup to that case's body.
1525 if (frame_ != NULL && default_exit.is_bound()) {
1526 default_exit.Jump();
1527 }
1528 }
1529
1530 if (fall_through.is_linked()) {
1531 fall_through.Bind();
1532 }
1533
1534 if (node->break_target()->is_linked()) {
1535 node->break_target()->Bind();
1536 }
1537 node->break_target()->Unuse();
1538 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1539}
1540
1541
Steve Block3ce2e202009-11-05 08:53:23 +00001542void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001543#ifdef DEBUG
1544 int original_height = frame_->height();
1545#endif
1546 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001547 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001548 CodeForStatementPosition(node);
1549 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001550 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001551
Steve Block3ce2e202009-11-05 08:53:23 +00001552 // Label the top of the loop for the backward CFG edge. If the test
1553 // is always true we can use the continue target, and if the test is
1554 // always false there is no need.
1555 ConditionAnalysis info = AnalyzeCondition(node->cond());
1556 switch (info) {
1557 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001558 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1559 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001560 break;
1561 case ALWAYS_FALSE:
1562 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1563 break;
1564 case DONT_KNOW:
1565 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1566 body.Bind();
1567 break;
1568 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001569
Steve Block3ce2e202009-11-05 08:53:23 +00001570 CheckStack(); // TODO(1222600): ignore if body contains calls.
1571 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001572
Steve Block3ce2e202009-11-05 08:53:23 +00001573 // Compile the test.
1574 switch (info) {
1575 case ALWAYS_TRUE:
1576 // If control can fall off the end of the body, jump back to the
1577 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001579 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001580 }
1581 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001582 case ALWAYS_FALSE:
1583 // If we have a continue in the body, we only have to bind its
1584 // jump target.
1585 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001587 }
Steve Block3ce2e202009-11-05 08:53:23 +00001588 break;
1589 case DONT_KNOW:
1590 // We have to compile the test expression if it can be reached by
1591 // control flow falling out of the body or via continue.
1592 if (node->continue_target()->is_linked()) {
1593 node->continue_target()->Bind();
1594 }
1595 if (has_valid_frame()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
1597 &body, node->break_target(), true);
1598 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001599 // A invalid frame here indicates that control did not
1600 // fall out of the test expression.
1601 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 }
1603 }
1604 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001605 }
1606
1607 if (node->break_target()->is_linked()) {
1608 node->break_target()->Bind();
1609 }
Steve Block3ce2e202009-11-05 08:53:23 +00001610 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1611}
1612
1613
1614void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1615#ifdef DEBUG
1616 int original_height = frame_->height();
1617#endif
1618 VirtualFrame::SpilledScope spilled_scope;
1619 Comment cmnt(masm_, "[ WhileStatement");
1620 CodeForStatementPosition(node);
1621
1622 // If the test is never true and has no side effects there is no need
1623 // to compile the test or body.
1624 ConditionAnalysis info = AnalyzeCondition(node->cond());
1625 if (info == ALWAYS_FALSE) return;
1626
1627 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1628
1629 // Label the top of the loop with the continue target for the backward
1630 // CFG edge.
1631 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1632 node->continue_target()->Bind();
1633
1634 if (info == DONT_KNOW) {
1635 JumpTarget body;
1636 LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
1637 &body, node->break_target(), true);
1638 if (has_valid_frame()) {
1639 // A NULL frame indicates that control did not fall out of the
1640 // test expression.
1641 Branch(false, node->break_target());
1642 }
1643 if (has_valid_frame() || body.is_linked()) {
1644 body.Bind();
1645 }
1646 }
1647
1648 if (has_valid_frame()) {
1649 CheckStack(); // TODO(1222600): ignore if body contains calls.
1650 VisitAndSpill(node->body());
1651
1652 // If control flow can fall out of the body, jump back to the top.
1653 if (has_valid_frame()) {
1654 node->continue_target()->Jump();
1655 }
1656 }
1657 if (node->break_target()->is_linked()) {
1658 node->break_target()->Bind();
1659 }
1660 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1661}
1662
1663
1664void CodeGenerator::VisitForStatement(ForStatement* node) {
1665#ifdef DEBUG
1666 int original_height = frame_->height();
1667#endif
1668 VirtualFrame::SpilledScope spilled_scope;
1669 Comment cmnt(masm_, "[ ForStatement");
1670 CodeForStatementPosition(node);
1671 if (node->init() != NULL) {
1672 VisitAndSpill(node->init());
1673 }
1674
1675 // If the test is never true there is no need to compile the test or
1676 // body.
1677 ConditionAnalysis info = AnalyzeCondition(node->cond());
1678 if (info == ALWAYS_FALSE) return;
1679
1680 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1681
1682 // If there is no update statement, label the top of the loop with the
1683 // continue target, otherwise with the loop target.
1684 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1685 if (node->next() == NULL) {
1686 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1687 node->continue_target()->Bind();
1688 } else {
1689 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1690 loop.Bind();
1691 }
1692
1693 // If the test is always true, there is no need to compile it.
1694 if (info == DONT_KNOW) {
1695 JumpTarget body;
1696 LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
1697 &body, node->break_target(), true);
1698 if (has_valid_frame()) {
1699 Branch(false, node->break_target());
1700 }
1701 if (has_valid_frame() || body.is_linked()) {
1702 body.Bind();
1703 }
1704 }
1705
1706 if (has_valid_frame()) {
1707 CheckStack(); // TODO(1222600): ignore if body contains calls.
1708 VisitAndSpill(node->body());
1709
1710 if (node->next() == NULL) {
1711 // If there is no update statement and control flow can fall out
1712 // of the loop, jump directly to the continue label.
1713 if (has_valid_frame()) {
1714 node->continue_target()->Jump();
1715 }
1716 } else {
1717 // If there is an update statement and control flow can reach it
1718 // via falling out of the body of the loop or continuing, we
1719 // compile the update statement.
1720 if (node->continue_target()->is_linked()) {
1721 node->continue_target()->Bind();
1722 }
1723 if (has_valid_frame()) {
1724 // Record source position of the statement as this code which is
1725 // after the code for the body actually belongs to the loop
1726 // statement and not the body.
1727 CodeForStatementPosition(node);
1728 VisitAndSpill(node->next());
1729 loop.Jump();
1730 }
1731 }
1732 }
1733 if (node->break_target()->is_linked()) {
1734 node->break_target()->Bind();
1735 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001736 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1737}
1738
1739
1740void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1741#ifdef DEBUG
1742 int original_height = frame_->height();
1743#endif
1744 VirtualFrame::SpilledScope spilled_scope;
1745 Comment cmnt(masm_, "[ ForInStatement");
1746 CodeForStatementPosition(node);
1747
1748 JumpTarget primitive;
1749 JumpTarget jsobject;
1750 JumpTarget fixed_array;
1751 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1752 JumpTarget end_del_check;
1753 JumpTarget exit;
1754
1755 // Get the object to enumerate over (converted to JSObject).
1756 LoadAndSpill(node->enumerable());
1757
1758 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1759 // to the specification. 12.6.4 mandates a call to ToObject.
1760 frame_->EmitPop(r0);
1761 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1762 __ cmp(r0, ip);
1763 exit.Branch(eq);
1764 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1765 __ cmp(r0, ip);
1766 exit.Branch(eq);
1767
1768 // Stack layout in body:
1769 // [iteration counter (Smi)]
1770 // [length of array]
1771 // [FixedArray]
1772 // [Map or 0]
1773 // [Object]
1774
1775 // Check if enumerable is already a JSObject
1776 __ tst(r0, Operand(kSmiTagMask));
1777 primitive.Branch(eq);
1778 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1779 jsobject.Branch(hs);
1780
1781 primitive.Bind();
1782 frame_->EmitPush(r0);
1783 Result arg_count(r0);
1784 __ mov(r0, Operand(0));
1785 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
1786
1787 jsobject.Bind();
1788 // Get the set of properties (as a FixedArray or Map).
1789 frame_->EmitPush(r0); // duplicate the object being enumerated
1790 frame_->EmitPush(r0);
1791 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1792
1793 // If we got a Map, we can do a fast modification check.
1794 // Otherwise, we got a FixedArray, and we have to do a slow check.
1795 __ mov(r2, Operand(r0));
1796 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1797 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1798 __ cmp(r1, ip);
1799 fixed_array.Branch(ne);
1800
1801 // Get enum cache
1802 __ mov(r1, Operand(r0));
1803 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1804 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1805 __ ldr(r2,
1806 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1807
1808 frame_->EmitPush(r0); // map
1809 frame_->EmitPush(r2); // enum cache bridge cache
1810 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1811 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1812 frame_->EmitPush(r0);
1813 __ mov(r0, Operand(Smi::FromInt(0)));
1814 frame_->EmitPush(r0);
1815 entry.Jump();
1816
1817 fixed_array.Bind();
1818 __ mov(r1, Operand(Smi::FromInt(0)));
1819 frame_->EmitPush(r1); // insert 0 in place of Map
1820 frame_->EmitPush(r0);
1821
1822 // Push the length of the array and the initial index onto the stack.
1823 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1824 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1825 frame_->EmitPush(r0);
1826 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1827 frame_->EmitPush(r0);
1828
1829 // Condition.
1830 entry.Bind();
1831 // sp[0] : index
1832 // sp[1] : array/enum cache length
1833 // sp[2] : array or enum cache
1834 // sp[3] : 0 or map
1835 // sp[4] : enumerable
1836 // Grab the current frame's height for the break and continue
1837 // targets only after all the state is pushed on the frame.
1838 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1839 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1840
1841 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1842 __ ldr(r1, frame_->ElementAt(1)); // load the length
1843 __ cmp(r0, Operand(r1)); // compare to the array length
1844 node->break_target()->Branch(hs);
1845
1846 __ ldr(r0, frame_->ElementAt(0));
1847
1848 // Get the i'th entry of the array.
1849 __ ldr(r2, frame_->ElementAt(2));
1850 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1851 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1852
1853 // Get Map or 0.
1854 __ ldr(r2, frame_->ElementAt(3));
1855 // Check if this (still) matches the map of the enumerable.
1856 // If not, we have to filter the key.
1857 __ ldr(r1, frame_->ElementAt(4));
1858 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1859 __ cmp(r1, Operand(r2));
1860 end_del_check.Branch(eq);
1861
1862 // Convert the entry to a string (or null if it isn't a property anymore).
1863 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1864 frame_->EmitPush(r0);
1865 frame_->EmitPush(r3); // push entry
1866 Result arg_count_reg(r0);
1867 __ mov(r0, Operand(1));
1868 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
1869 __ mov(r3, Operand(r0));
1870
1871 // If the property has been removed while iterating, we just skip it.
1872 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1873 __ cmp(r3, ip);
1874 node->continue_target()->Branch(eq);
1875
1876 end_del_check.Bind();
1877 // Store the entry in the 'each' expression and take another spin in the
1878 // loop. r3: i'th entry of the enum cache (or string there of)
1879 frame_->EmitPush(r3); // push entry
1880 { Reference each(this, node->each());
1881 if (!each.is_illegal()) {
1882 if (each.size() > 0) {
1883 __ ldr(r0, frame_->ElementAt(each.size()));
1884 frame_->EmitPush(r0);
1885 }
1886 // If the reference was to a slot we rely on the convenient property
1887 // that it doesn't matter whether a value (eg, r3 pushed above) is
1888 // right on top of or right underneath a zero-sized reference.
1889 each.SetValue(NOT_CONST_INIT);
1890 if (each.size() > 0) {
1891 // It's safe to pop the value lying on top of the reference before
1892 // unloading the reference itself (which preserves the top of stack,
1893 // ie, now the topmost value of the non-zero sized reference), since
1894 // we will discard the top of stack after unloading the reference
1895 // anyway.
1896 frame_->EmitPop(r0);
1897 }
1898 }
1899 }
1900 // Discard the i'th entry pushed above or else the remainder of the
1901 // reference, whichever is currently on top of the stack.
1902 frame_->Drop();
1903
1904 // Body.
1905 CheckStack(); // TODO(1222600): ignore if body contains calls.
1906 VisitAndSpill(node->body());
1907
1908 // Next. Reestablish a spilled frame in case we are coming here via
1909 // a continue in the body.
1910 node->continue_target()->Bind();
1911 frame_->SpillAll();
1912 frame_->EmitPop(r0);
1913 __ add(r0, r0, Operand(Smi::FromInt(1)));
1914 frame_->EmitPush(r0);
1915 entry.Jump();
1916
1917 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1918 // any frame.
1919 node->break_target()->Bind();
1920 frame_->Drop(5);
1921
1922 // Exit.
1923 exit.Bind();
1924 node->continue_target()->Unuse();
1925 node->break_target()->Unuse();
1926 ASSERT(frame_->height() == original_height);
1927}
1928
1929
Steve Block3ce2e202009-11-05 08:53:23 +00001930void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001931#ifdef DEBUG
1932 int original_height = frame_->height();
1933#endif
1934 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001935 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001936 CodeForStatementPosition(node);
1937
1938 JumpTarget try_block;
1939 JumpTarget exit;
1940
1941 try_block.Call();
1942 // --- Catch block ---
1943 frame_->EmitPush(r0);
1944
1945 // Store the caught exception in the catch variable.
1946 { Reference ref(this, node->catch_var());
1947 ASSERT(ref.is_slot());
1948 // Here we make use of the convenient property that it doesn't matter
1949 // whether a value is immediately on top of or underneath a zero-sized
1950 // reference.
1951 ref.SetValue(NOT_CONST_INIT);
1952 }
1953
1954 // Remove the exception from the stack.
1955 frame_->Drop();
1956
1957 VisitStatementsAndSpill(node->catch_block()->statements());
1958 if (frame_ != NULL) {
1959 exit.Jump();
1960 }
1961
1962
1963 // --- Try block ---
1964 try_block.Bind();
1965
1966 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1967 int handler_height = frame_->height();
1968
1969 // Shadow the labels for all escapes from the try block, including
1970 // returns. During shadowing, the original label is hidden as the
1971 // LabelShadow and operations on the original actually affect the
1972 // shadowing label.
1973 //
1974 // We should probably try to unify the escaping labels and the return
1975 // label.
1976 int nof_escapes = node->escaping_targets()->length();
1977 List<ShadowTarget*> shadows(1 + nof_escapes);
1978
1979 // Add the shadow target for the function return.
1980 static const int kReturnShadowIndex = 0;
1981 shadows.Add(new ShadowTarget(&function_return_));
1982 bool function_return_was_shadowed = function_return_is_shadowed_;
1983 function_return_is_shadowed_ = true;
1984 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1985
1986 // Add the remaining shadow targets.
1987 for (int i = 0; i < nof_escapes; i++) {
1988 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1989 }
1990
1991 // Generate code for the statements in the try block.
1992 VisitStatementsAndSpill(node->try_block()->statements());
1993
1994 // Stop the introduced shadowing and count the number of required unlinks.
1995 // After shadowing stops, the original labels are unshadowed and the
1996 // LabelShadows represent the formerly shadowing labels.
1997 bool has_unlinks = false;
1998 for (int i = 0; i < shadows.length(); i++) {
1999 shadows[i]->StopShadowing();
2000 has_unlinks = has_unlinks || shadows[i]->is_linked();
2001 }
2002 function_return_is_shadowed_ = function_return_was_shadowed;
2003
2004 // Get an external reference to the handler address.
2005 ExternalReference handler_address(Top::k_handler_address);
2006
2007 // If we can fall off the end of the try block, unlink from try chain.
2008 if (has_valid_frame()) {
2009 // The next handler address is on top of the frame. Unlink from
2010 // the handler list and drop the rest of this handler from the
2011 // frame.
2012 ASSERT(StackHandlerConstants::kNextOffset == 0);
2013 frame_->EmitPop(r1);
2014 __ mov(r3, Operand(handler_address));
2015 __ str(r1, MemOperand(r3));
2016 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2017 if (has_unlinks) {
2018 exit.Jump();
2019 }
2020 }
2021
2022 // Generate unlink code for the (formerly) shadowing labels that have been
2023 // jumped to. Deallocate each shadow target.
2024 for (int i = 0; i < shadows.length(); i++) {
2025 if (shadows[i]->is_linked()) {
2026 // Unlink from try chain;
2027 shadows[i]->Bind();
2028 // Because we can be jumping here (to spilled code) from unspilled
2029 // code, we need to reestablish a spilled frame at this block.
2030 frame_->SpillAll();
2031
2032 // Reload sp from the top handler, because some statements that we
2033 // break from (eg, for...in) may have left stuff on the stack.
2034 __ mov(r3, Operand(handler_address));
2035 __ ldr(sp, MemOperand(r3));
2036 frame_->Forget(frame_->height() - handler_height);
2037
2038 ASSERT(StackHandlerConstants::kNextOffset == 0);
2039 frame_->EmitPop(r1);
2040 __ str(r1, MemOperand(r3));
2041 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2042
2043 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2044 frame_->PrepareForReturn();
2045 }
2046 shadows[i]->other_target()->Jump();
2047 }
2048 }
2049
2050 exit.Bind();
2051 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2052}
2053
2054
Steve Block3ce2e202009-11-05 08:53:23 +00002055void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002056#ifdef DEBUG
2057 int original_height = frame_->height();
2058#endif
2059 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002060 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002061 CodeForStatementPosition(node);
2062
2063 // State: Used to keep track of reason for entering the finally
2064 // block. Should probably be extended to hold information for
2065 // break/continue from within the try block.
2066 enum { FALLING, THROWING, JUMPING };
2067
2068 JumpTarget try_block;
2069 JumpTarget finally_block;
2070
2071 try_block.Call();
2072
2073 frame_->EmitPush(r0); // save exception object on the stack
2074 // In case of thrown exceptions, this is where we continue.
2075 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2076 finally_block.Jump();
2077
2078 // --- Try block ---
2079 try_block.Bind();
2080
2081 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2082 int handler_height = frame_->height();
2083
2084 // Shadow the labels for all escapes from the try block, including
2085 // returns. Shadowing hides the original label as the LabelShadow and
2086 // operations on the original actually affect the shadowing label.
2087 //
2088 // We should probably try to unify the escaping labels and the return
2089 // label.
2090 int nof_escapes = node->escaping_targets()->length();
2091 List<ShadowTarget*> shadows(1 + nof_escapes);
2092
2093 // Add the shadow target for the function return.
2094 static const int kReturnShadowIndex = 0;
2095 shadows.Add(new ShadowTarget(&function_return_));
2096 bool function_return_was_shadowed = function_return_is_shadowed_;
2097 function_return_is_shadowed_ = true;
2098 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2099
2100 // Add the remaining shadow targets.
2101 for (int i = 0; i < nof_escapes; i++) {
2102 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2103 }
2104
2105 // Generate code for the statements in the try block.
2106 VisitStatementsAndSpill(node->try_block()->statements());
2107
2108 // Stop the introduced shadowing and count the number of required unlinks.
2109 // After shadowing stops, the original labels are unshadowed and the
2110 // LabelShadows represent the formerly shadowing labels.
2111 int nof_unlinks = 0;
2112 for (int i = 0; i < shadows.length(); i++) {
2113 shadows[i]->StopShadowing();
2114 if (shadows[i]->is_linked()) nof_unlinks++;
2115 }
2116 function_return_is_shadowed_ = function_return_was_shadowed;
2117
2118 // Get an external reference to the handler address.
2119 ExternalReference handler_address(Top::k_handler_address);
2120
2121 // If we can fall off the end of the try block, unlink from the try
2122 // chain and set the state on the frame to FALLING.
2123 if (has_valid_frame()) {
2124 // The next handler address is on top of the frame.
2125 ASSERT(StackHandlerConstants::kNextOffset == 0);
2126 frame_->EmitPop(r1);
2127 __ mov(r3, Operand(handler_address));
2128 __ str(r1, MemOperand(r3));
2129 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2130
2131 // Fake a top of stack value (unneeded when FALLING) and set the
2132 // state in r2, then jump around the unlink blocks if any.
2133 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2134 frame_->EmitPush(r0);
2135 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2136 if (nof_unlinks > 0) {
2137 finally_block.Jump();
2138 }
2139 }
2140
2141 // Generate code to unlink and set the state for the (formerly)
2142 // shadowing targets that have been jumped to.
2143 for (int i = 0; i < shadows.length(); i++) {
2144 if (shadows[i]->is_linked()) {
2145 // If we have come from the shadowed return, the return value is
2146 // in (a non-refcounted reference to) r0. We must preserve it
2147 // until it is pushed.
2148 //
2149 // Because we can be jumping here (to spilled code) from
2150 // unspilled code, we need to reestablish a spilled frame at
2151 // this block.
2152 shadows[i]->Bind();
2153 frame_->SpillAll();
2154
2155 // Reload sp from the top handler, because some statements that
2156 // we break from (eg, for...in) may have left stuff on the
2157 // stack.
2158 __ mov(r3, Operand(handler_address));
2159 __ ldr(sp, MemOperand(r3));
2160 frame_->Forget(frame_->height() - handler_height);
2161
2162 // Unlink this handler and drop it from the frame. The next
2163 // handler address is currently on top of the frame.
2164 ASSERT(StackHandlerConstants::kNextOffset == 0);
2165 frame_->EmitPop(r1);
2166 __ str(r1, MemOperand(r3));
2167 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2168
2169 if (i == kReturnShadowIndex) {
2170 // If this label shadowed the function return, materialize the
2171 // return value on the stack.
2172 frame_->EmitPush(r0);
2173 } else {
2174 // Fake TOS for targets that shadowed breaks and continues.
2175 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2176 frame_->EmitPush(r0);
2177 }
2178 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2179 if (--nof_unlinks > 0) {
2180 // If this is not the last unlink block, jump around the next.
2181 finally_block.Jump();
2182 }
2183 }
2184 }
2185
2186 // --- Finally block ---
2187 finally_block.Bind();
2188
2189 // Push the state on the stack.
2190 frame_->EmitPush(r2);
2191
2192 // We keep two elements on the stack - the (possibly faked) result
2193 // and the state - while evaluating the finally block.
2194 //
2195 // Generate code for the statements in the finally block.
2196 VisitStatementsAndSpill(node->finally_block()->statements());
2197
2198 if (has_valid_frame()) {
2199 // Restore state and return value or faked TOS.
2200 frame_->EmitPop(r2);
2201 frame_->EmitPop(r0);
2202 }
2203
2204 // Generate code to jump to the right destination for all used
2205 // formerly shadowing targets. Deallocate each shadow target.
2206 for (int i = 0; i < shadows.length(); i++) {
2207 if (has_valid_frame() && shadows[i]->is_bound()) {
2208 JumpTarget* original = shadows[i]->other_target();
2209 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2210 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2211 JumpTarget skip;
2212 skip.Branch(ne);
2213 frame_->PrepareForReturn();
2214 original->Jump();
2215 skip.Bind();
2216 } else {
2217 original->Branch(eq);
2218 }
2219 }
2220 }
2221
2222 if (has_valid_frame()) {
2223 // Check if we need to rethrow the exception.
2224 JumpTarget exit;
2225 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2226 exit.Branch(ne);
2227
2228 // Rethrow exception.
2229 frame_->EmitPush(r0);
2230 frame_->CallRuntime(Runtime::kReThrow, 1);
2231
2232 // Done.
2233 exit.Bind();
2234 }
2235 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2236}
2237
2238
2239void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2240#ifdef DEBUG
2241 int original_height = frame_->height();
2242#endif
2243 VirtualFrame::SpilledScope spilled_scope;
2244 Comment cmnt(masm_, "[ DebuggerStatament");
2245 CodeForStatementPosition(node);
2246#ifdef ENABLE_DEBUGGER_SUPPORT
2247 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2248#endif
2249 // Ignore the return value.
2250 ASSERT(frame_->height() == original_height);
2251}
2252
2253
2254void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2255 VirtualFrame::SpilledScope spilled_scope;
2256 ASSERT(boilerplate->IsBoilerplate());
2257
Steve Blocka7e24c12009-10-30 11:49:00 +00002258 // Create a new closure.
2259 frame_->EmitPush(cp);
Steve Block3ce2e202009-11-05 08:53:23 +00002260 __ mov(r0, Operand(boilerplate));
2261 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002262 frame_->CallRuntime(Runtime::kNewClosure, 2);
2263 frame_->EmitPush(r0);
2264}
2265
2266
2267void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2268#ifdef DEBUG
2269 int original_height = frame_->height();
2270#endif
2271 VirtualFrame::SpilledScope spilled_scope;
2272 Comment cmnt(masm_, "[ FunctionLiteral");
2273
2274 // Build the function boilerplate and instantiate it.
2275 Handle<JSFunction> boilerplate = BuildBoilerplate(node);
2276 // Check for stack-overflow exception.
2277 if (HasStackOverflow()) {
2278 ASSERT(frame_->height() == original_height);
2279 return;
2280 }
2281 InstantiateBoilerplate(boilerplate);
2282 ASSERT(frame_->height() == original_height + 1);
2283}
2284
2285
2286void CodeGenerator::VisitFunctionBoilerplateLiteral(
2287 FunctionBoilerplateLiteral* node) {
2288#ifdef DEBUG
2289 int original_height = frame_->height();
2290#endif
2291 VirtualFrame::SpilledScope spilled_scope;
2292 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2293 InstantiateBoilerplate(node->boilerplate());
2294 ASSERT(frame_->height() == original_height + 1);
2295}
2296
2297
2298void CodeGenerator::VisitConditional(Conditional* node) {
2299#ifdef DEBUG
2300 int original_height = frame_->height();
2301#endif
2302 VirtualFrame::SpilledScope spilled_scope;
2303 Comment cmnt(masm_, "[ Conditional");
2304 JumpTarget then;
2305 JumpTarget else_;
2306 LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
2307 &then, &else_, true);
2308 if (has_valid_frame()) {
2309 Branch(false, &else_);
2310 }
2311 if (has_valid_frame() || then.is_linked()) {
2312 then.Bind();
2313 LoadAndSpill(node->then_expression(), typeof_state());
2314 }
2315 if (else_.is_linked()) {
2316 JumpTarget exit;
2317 if (has_valid_frame()) exit.Jump();
2318 else_.Bind();
2319 LoadAndSpill(node->else_expression(), typeof_state());
2320 if (exit.is_linked()) exit.Bind();
2321 }
2322 ASSERT(frame_->height() == original_height + 1);
2323}
2324
2325
2326void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2327 VirtualFrame::SpilledScope spilled_scope;
2328 if (slot->type() == Slot::LOOKUP) {
2329 ASSERT(slot->var()->is_dynamic());
2330
2331 JumpTarget slow;
2332 JumpTarget done;
2333
2334 // Generate fast-case code for variables that might be shadowed by
2335 // eval-introduced variables. Eval is used a lot without
2336 // introducing variables. In those cases, we do not want to
2337 // perform a runtime call for all variables in the scope
2338 // containing the eval.
2339 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2340 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2341 // If there was no control flow to slow, we can exit early.
2342 if (!slow.is_linked()) {
2343 frame_->EmitPush(r0);
2344 return;
2345 }
2346
2347 done.Jump();
2348
2349 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2350 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2351 // Only generate the fast case for locals that rewrite to slots.
2352 // This rules out argument loads.
2353 if (potential_slot != NULL) {
2354 __ ldr(r0,
2355 ContextSlotOperandCheckExtensions(potential_slot,
2356 r1,
2357 r2,
2358 &slow));
2359 if (potential_slot->var()->mode() == Variable::CONST) {
2360 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2361 __ cmp(r0, ip);
2362 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2363 }
2364 // There is always control flow to slow from
2365 // ContextSlotOperandCheckExtensions so we have to jump around
2366 // it.
2367 done.Jump();
2368 }
2369 }
2370
2371 slow.Bind();
2372 frame_->EmitPush(cp);
2373 __ mov(r0, Operand(slot->var()->name()));
2374 frame_->EmitPush(r0);
2375
2376 if (typeof_state == INSIDE_TYPEOF) {
2377 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2378 } else {
2379 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2380 }
2381
2382 done.Bind();
2383 frame_->EmitPush(r0);
2384
2385 } else {
2386 // Note: We would like to keep the assert below, but it fires because of
2387 // some nasty code in LoadTypeofExpression() which should be removed...
2388 // ASSERT(!slot->var()->is_dynamic());
2389
2390 // Special handling for locals allocated in registers.
2391 __ ldr(r0, SlotOperand(slot, r2));
2392 frame_->EmitPush(r0);
2393 if (slot->var()->mode() == Variable::CONST) {
2394 // Const slots may contain 'the hole' value (the constant hasn't been
2395 // initialized yet) which needs to be converted into the 'undefined'
2396 // value.
2397 Comment cmnt(masm_, "[ Unhole const");
2398 frame_->EmitPop(r0);
2399 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2400 __ cmp(r0, ip);
2401 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2402 frame_->EmitPush(r0);
2403 }
2404 }
2405}
2406
2407
2408void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2409 TypeofState typeof_state,
2410 Register tmp,
2411 Register tmp2,
2412 JumpTarget* slow) {
2413 // Check that no extension objects have been created by calls to
2414 // eval from the current scope to the global scope.
2415 Register context = cp;
2416 Scope* s = scope();
2417 while (s != NULL) {
2418 if (s->num_heap_slots() > 0) {
2419 if (s->calls_eval()) {
2420 // Check that extension is NULL.
2421 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2422 __ tst(tmp2, tmp2);
2423 slow->Branch(ne);
2424 }
2425 // Load next context in chain.
2426 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2427 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2428 context = tmp;
2429 }
2430 // If no outer scope calls eval, we do not need to check more
2431 // context extensions.
2432 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2433 s = s->outer_scope();
2434 }
2435
2436 if (s->is_eval_scope()) {
2437 Label next, fast;
2438 if (!context.is(tmp)) {
2439 __ mov(tmp, Operand(context));
2440 }
2441 __ bind(&next);
2442 // Terminate at global context.
2443 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2444 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2445 __ cmp(tmp2, ip);
2446 __ b(eq, &fast);
2447 // Check that extension is NULL.
2448 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2449 __ tst(tmp2, tmp2);
2450 slow->Branch(ne);
2451 // Load next context in chain.
2452 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2453 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2454 __ b(&next);
2455 __ bind(&fast);
2456 }
2457
2458 // All extension objects were empty and it is safe to use a global
2459 // load IC call.
2460 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2461 // Load the global object.
2462 LoadGlobal();
2463 // Setup the name register.
2464 Result name(r2);
2465 __ mov(r2, Operand(slot->var()->name()));
2466 // Call IC stub.
2467 if (typeof_state == INSIDE_TYPEOF) {
2468 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
2469 } else {
2470 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
2471 }
2472
2473 // Drop the global object. The result is in r0.
2474 frame_->Drop();
2475}
2476
2477
2478void CodeGenerator::VisitSlot(Slot* node) {
2479#ifdef DEBUG
2480 int original_height = frame_->height();
2481#endif
2482 VirtualFrame::SpilledScope spilled_scope;
2483 Comment cmnt(masm_, "[ Slot");
2484 LoadFromSlot(node, typeof_state());
2485 ASSERT(frame_->height() == original_height + 1);
2486}
2487
2488
2489void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2490#ifdef DEBUG
2491 int original_height = frame_->height();
2492#endif
2493 VirtualFrame::SpilledScope spilled_scope;
2494 Comment cmnt(masm_, "[ VariableProxy");
2495
2496 Variable* var = node->var();
2497 Expression* expr = var->rewrite();
2498 if (expr != NULL) {
2499 Visit(expr);
2500 } else {
2501 ASSERT(var->is_global());
2502 Reference ref(this, node);
2503 ref.GetValueAndSpill(typeof_state());
2504 }
2505 ASSERT(frame_->height() == original_height + 1);
2506}
2507
2508
2509void CodeGenerator::VisitLiteral(Literal* node) {
2510#ifdef DEBUG
2511 int original_height = frame_->height();
2512#endif
2513 VirtualFrame::SpilledScope spilled_scope;
2514 Comment cmnt(masm_, "[ Literal");
2515 __ mov(r0, Operand(node->handle()));
2516 frame_->EmitPush(r0);
2517 ASSERT(frame_->height() == original_height + 1);
2518}
2519
2520
2521void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2522#ifdef DEBUG
2523 int original_height = frame_->height();
2524#endif
2525 VirtualFrame::SpilledScope spilled_scope;
2526 Comment cmnt(masm_, "[ RexExp Literal");
2527
2528 // Retrieve the literal array and check the allocated entry.
2529
2530 // Load the function of this activation.
2531 __ ldr(r1, frame_->Function());
2532
2533 // Load the literals array of the function.
2534 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2535
2536 // Load the literal at the ast saved index.
2537 int literal_offset =
2538 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2539 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2540
2541 JumpTarget done;
2542 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2543 __ cmp(r2, ip);
2544 done.Branch(ne);
2545
2546 // If the entry is undefined we call the runtime system to computed
2547 // the literal.
2548 frame_->EmitPush(r1); // literal array (0)
2549 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2550 frame_->EmitPush(r0); // literal index (1)
2551 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2552 frame_->EmitPush(r0);
2553 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2554 frame_->EmitPush(r0);
2555 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2556 __ mov(r2, Operand(r0));
2557
2558 done.Bind();
2559 // Push the literal.
2560 frame_->EmitPush(r2);
2561 ASSERT(frame_->height() == original_height + 1);
2562}
2563
2564
2565// This deferred code stub will be used for creating the boilerplate
2566// by calling Runtime_CreateObjectLiteralBoilerplate.
2567// Each created boilerplate is stored in the JSFunction and they are
2568// therefore context dependent.
2569class DeferredObjectLiteral: public DeferredCode {
2570 public:
2571 explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
2572 set_comment("[ DeferredObjectLiteral");
2573 }
2574
2575 virtual void Generate();
2576
2577 private:
2578 ObjectLiteral* node_;
2579};
2580
2581
2582void DeferredObjectLiteral::Generate() {
2583 // Argument is passed in r1.
2584
2585 // If the entry is undefined we call the runtime system to compute
2586 // the literal.
2587 // Literal array (0).
2588 __ push(r1);
2589 // Literal index (1).
2590 __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
2591 __ push(r0);
2592 // Constant properties (2).
2593 __ mov(r0, Operand(node_->constant_properties()));
2594 __ push(r0);
2595 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
2596 __ mov(r2, Operand(r0));
2597 // Result is returned in r2.
2598}
2599
2600
2601void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2602#ifdef DEBUG
2603 int original_height = frame_->height();
2604#endif
2605 VirtualFrame::SpilledScope spilled_scope;
2606 Comment cmnt(masm_, "[ ObjectLiteral");
2607
2608 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
2609
2610 // Retrieve the literal array and check the allocated entry.
2611
2612 // Load the function of this activation.
2613 __ ldr(r1, frame_->Function());
2614
2615 // Load the literals array of the function.
2616 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2617
2618 // Load the literal at the ast saved index.
2619 int literal_offset =
2620 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2621 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2622
2623 // Check whether we need to materialize the object literal boilerplate.
2624 // If so, jump to the deferred code.
2625 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2626 __ cmp(r2, Operand(ip));
2627 deferred->Branch(eq);
2628 deferred->BindExit();
2629
2630 // Push the object literal boilerplate.
2631 frame_->EmitPush(r2);
2632
2633 // Clone the boilerplate object.
2634 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2635 if (node->depth() == 1) {
2636 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2637 }
2638 frame_->CallRuntime(clone_function_id, 1);
2639 frame_->EmitPush(r0); // save the result
2640 // r0: cloned object literal
2641
2642 for (int i = 0; i < node->properties()->length(); i++) {
2643 ObjectLiteral::Property* property = node->properties()->at(i);
2644 Literal* key = property->key();
2645 Expression* value = property->value();
2646 switch (property->kind()) {
2647 case ObjectLiteral::Property::CONSTANT:
2648 break;
2649 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2650 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2651 // else fall through
2652 case ObjectLiteral::Property::COMPUTED: // fall through
2653 case ObjectLiteral::Property::PROTOTYPE: {
2654 frame_->EmitPush(r0); // dup the result
2655 LoadAndSpill(key);
2656 LoadAndSpill(value);
2657 frame_->CallRuntime(Runtime::kSetProperty, 3);
2658 // restore r0
2659 __ ldr(r0, frame_->Top());
2660 break;
2661 }
2662 case ObjectLiteral::Property::SETTER: {
2663 frame_->EmitPush(r0);
2664 LoadAndSpill(key);
2665 __ mov(r0, Operand(Smi::FromInt(1)));
2666 frame_->EmitPush(r0);
2667 LoadAndSpill(value);
2668 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2669 __ ldr(r0, frame_->Top());
2670 break;
2671 }
2672 case ObjectLiteral::Property::GETTER: {
2673 frame_->EmitPush(r0);
2674 LoadAndSpill(key);
2675 __ mov(r0, Operand(Smi::FromInt(0)));
2676 frame_->EmitPush(r0);
2677 LoadAndSpill(value);
2678 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2679 __ ldr(r0, frame_->Top());
2680 break;
2681 }
2682 }
2683 }
2684 ASSERT(frame_->height() == original_height + 1);
2685}
2686
2687
2688// This deferred code stub will be used for creating the boilerplate
2689// by calling Runtime_CreateArrayLiteralBoilerplate.
2690// Each created boilerplate is stored in the JSFunction and they are
2691// therefore context dependent.
2692class DeferredArrayLiteral: public DeferredCode {
2693 public:
2694 explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
2695 set_comment("[ DeferredArrayLiteral");
2696 }
2697
2698 virtual void Generate();
2699
2700 private:
2701 ArrayLiteral* node_;
2702};
2703
2704
2705void DeferredArrayLiteral::Generate() {
2706 // Argument is passed in r1.
2707
2708 // If the entry is undefined we call the runtime system to computed
2709 // the literal.
2710 // Literal array (0).
2711 __ push(r1);
2712 // Literal index (1).
2713 __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
2714 __ push(r0);
2715 // Constant properties (2).
2716 __ mov(r0, Operand(node_->literals()));
2717 __ push(r0);
2718 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
2719 __ mov(r2, Operand(r0));
2720 // Result is returned in r2.
2721}
2722
2723
2724void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2725#ifdef DEBUG
2726 int original_height = frame_->height();
2727#endif
2728 VirtualFrame::SpilledScope spilled_scope;
2729 Comment cmnt(masm_, "[ ArrayLiteral");
2730
2731 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
2732
2733 // Retrieve the literal array and check the allocated entry.
2734
2735 // Load the function of this activation.
2736 __ ldr(r1, frame_->Function());
2737
2738 // Load the literals array of the function.
2739 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2740
2741 // Load the literal at the ast saved index.
2742 int literal_offset =
2743 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2744 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2745
2746 // Check whether we need to materialize the object literal boilerplate.
2747 // If so, jump to the deferred code.
2748 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2749 __ cmp(r2, Operand(ip));
2750 deferred->Branch(eq);
2751 deferred->BindExit();
2752
2753 // Push the object literal boilerplate.
2754 frame_->EmitPush(r2);
2755
2756 // Clone the boilerplate object.
2757 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2758 if (node->depth() == 1) {
2759 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2760 }
2761 frame_->CallRuntime(clone_function_id, 1);
2762 frame_->EmitPush(r0); // save the result
2763 // r0: cloned object literal
2764
2765 // Generate code to set the elements in the array that are not
2766 // literals.
2767 for (int i = 0; i < node->values()->length(); i++) {
2768 Expression* value = node->values()->at(i);
2769
2770 // If value is a literal the property value is already set in the
2771 // boilerplate object.
2772 if (value->AsLiteral() != NULL) continue;
2773 // If value is a materialized literal the property value is already set
2774 // in the boilerplate object if it is simple.
2775 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2776
2777 // The property must be set by generated code.
2778 LoadAndSpill(value);
2779 frame_->EmitPop(r0);
2780
2781 // Fetch the object literal.
2782 __ ldr(r1, frame_->Top());
2783 // Get the elements array.
2784 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2785
2786 // Write to the indexed properties array.
2787 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2788 __ str(r0, FieldMemOperand(r1, offset));
2789
2790 // Update the write barrier for the array address.
2791 __ mov(r3, Operand(offset));
2792 __ RecordWrite(r1, r3, r2);
2793 }
2794 ASSERT(frame_->height() == original_height + 1);
2795}
2796
2797
2798void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2799#ifdef DEBUG
2800 int original_height = frame_->height();
2801#endif
2802 VirtualFrame::SpilledScope spilled_scope;
2803 // Call runtime routine to allocate the catch extension object and
2804 // assign the exception value to the catch variable.
2805 Comment cmnt(masm_, "[ CatchExtensionObject");
2806 LoadAndSpill(node->key());
2807 LoadAndSpill(node->value());
2808 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2809 frame_->EmitPush(r0);
2810 ASSERT(frame_->height() == original_height + 1);
2811}
2812
2813
2814void CodeGenerator::VisitAssignment(Assignment* node) {
2815#ifdef DEBUG
2816 int original_height = frame_->height();
2817#endif
2818 VirtualFrame::SpilledScope spilled_scope;
2819 Comment cmnt(masm_, "[ Assignment");
2820
2821 { Reference target(this, node->target());
2822 if (target.is_illegal()) {
2823 // Fool the virtual frame into thinking that we left the assignment's
2824 // value on the frame.
2825 __ mov(r0, Operand(Smi::FromInt(0)));
2826 frame_->EmitPush(r0);
2827 ASSERT(frame_->height() == original_height + 1);
2828 return;
2829 }
2830
2831 if (node->op() == Token::ASSIGN ||
2832 node->op() == Token::INIT_VAR ||
2833 node->op() == Token::INIT_CONST) {
2834 LoadAndSpill(node->value());
2835
2836 } else {
2837 // +=, *= and similar binary assignments.
2838 // Get the old value of the lhs.
2839 target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
2840 Literal* literal = node->value()->AsLiteral();
2841 bool overwrite =
2842 (node->value()->AsBinaryOperation() != NULL &&
2843 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2844 if (literal != NULL && literal->handle()->IsSmi()) {
2845 SmiOperation(node->binary_op(),
2846 literal->handle(),
2847 false,
2848 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2849 frame_->EmitPush(r0);
2850
2851 } else {
2852 LoadAndSpill(node->value());
2853 GenericBinaryOperation(node->binary_op(),
2854 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2855 frame_->EmitPush(r0);
2856 }
2857 }
2858
2859 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2860 if (var != NULL &&
2861 (var->mode() == Variable::CONST) &&
2862 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2863 // Assignment ignored - leave the value on the stack.
2864
2865 } else {
2866 CodeForSourcePosition(node->position());
2867 if (node->op() == Token::INIT_CONST) {
2868 // Dynamic constant initializations must use the function context
2869 // and initialize the actual constant declared. Dynamic variable
2870 // initializations are simply assignments and use SetValue.
2871 target.SetValue(CONST_INIT);
2872 } else {
2873 target.SetValue(NOT_CONST_INIT);
2874 }
2875 }
2876 }
2877 ASSERT(frame_->height() == original_height + 1);
2878}
2879
2880
2881void CodeGenerator::VisitThrow(Throw* node) {
2882#ifdef DEBUG
2883 int original_height = frame_->height();
2884#endif
2885 VirtualFrame::SpilledScope spilled_scope;
2886 Comment cmnt(masm_, "[ Throw");
2887
2888 LoadAndSpill(node->exception());
2889 CodeForSourcePosition(node->position());
2890 frame_->CallRuntime(Runtime::kThrow, 1);
2891 frame_->EmitPush(r0);
2892 ASSERT(frame_->height() == original_height + 1);
2893}
2894
2895
2896void CodeGenerator::VisitProperty(Property* node) {
2897#ifdef DEBUG
2898 int original_height = frame_->height();
2899#endif
2900 VirtualFrame::SpilledScope spilled_scope;
2901 Comment cmnt(masm_, "[ Property");
2902
2903 { Reference property(this, node);
2904 property.GetValueAndSpill(typeof_state());
2905 }
2906 ASSERT(frame_->height() == original_height + 1);
2907}
2908
2909
2910void CodeGenerator::VisitCall(Call* node) {
2911#ifdef DEBUG
2912 int original_height = frame_->height();
2913#endif
2914 VirtualFrame::SpilledScope spilled_scope;
2915 Comment cmnt(masm_, "[ Call");
2916
2917 Expression* function = node->expression();
2918 ZoneList<Expression*>* args = node->arguments();
2919
2920 // Standard function call.
2921 // Check if the function is a variable or a property.
2922 Variable* var = function->AsVariableProxy()->AsVariable();
2923 Property* property = function->AsProperty();
2924
2925 // ------------------------------------------------------------------------
2926 // Fast-case: Use inline caching.
2927 // ---
2928 // According to ECMA-262, section 11.2.3, page 44, the function to call
2929 // must be resolved after the arguments have been evaluated. The IC code
2930 // automatically handles this by loading the arguments before the function
2931 // is resolved in cache misses (this also holds for megamorphic calls).
2932 // ------------------------------------------------------------------------
2933
2934 if (var != NULL && var->is_possibly_eval()) {
2935 // ----------------------------------
2936 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2937 // ----------------------------------
2938
2939 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2940 // resolve the function we need to call and the receiver of the
2941 // call. Then we call the resolved function using the given
2942 // arguments.
2943 // Prepare stack for call to resolved function.
2944 LoadAndSpill(function);
2945 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2946 frame_->EmitPush(r2); // Slot for receiver
2947 int arg_count = args->length();
2948 for (int i = 0; i < arg_count; i++) {
2949 LoadAndSpill(args->at(i));
2950 }
2951
2952 // Prepare stack for call to ResolvePossiblyDirectEval.
2953 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2954 frame_->EmitPush(r1);
2955 if (arg_count > 0) {
2956 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
2957 frame_->EmitPush(r1);
2958 } else {
2959 frame_->EmitPush(r2);
2960 }
2961
2962 // Resolve the call.
2963 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
2964
2965 // Touch up stack with the right values for the function and the receiver.
2966 __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
2967 __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
2968 __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
2969 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
2970
2971 // Call the function.
2972 CodeForSourcePosition(node->position());
2973
2974 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2975 CallFunctionStub call_function(arg_count, in_loop);
2976 frame_->CallStub(&call_function, arg_count + 1);
2977
2978 __ ldr(cp, frame_->Context());
2979 // Remove the function from the stack.
2980 frame_->Drop();
2981 frame_->EmitPush(r0);
2982
2983 } else if (var != NULL && !var->is_this() && var->is_global()) {
2984 // ----------------------------------
2985 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2986 // ----------------------------------
2987
2988 // Push the name of the function and the receiver onto the stack.
2989 __ mov(r0, Operand(var->name()));
2990 frame_->EmitPush(r0);
2991
2992 // Pass the global object as the receiver and let the IC stub
2993 // patch the stack to use the global proxy as 'this' in the
2994 // invoked function.
2995 LoadGlobal();
2996
2997 // Load the arguments.
2998 int arg_count = args->length();
2999 for (int i = 0; i < arg_count; i++) {
3000 LoadAndSpill(args->at(i));
3001 }
3002
3003 // Setup the receiver register and call the IC initialization code.
3004 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3005 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3006 CodeForSourcePosition(node->position());
3007 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3008 arg_count + 1);
3009 __ ldr(cp, frame_->Context());
3010 // Remove the function from the stack.
3011 frame_->Drop();
3012 frame_->EmitPush(r0);
3013
3014 } else if (var != NULL && var->slot() != NULL &&
3015 var->slot()->type() == Slot::LOOKUP) {
3016 // ----------------------------------
3017 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3018 // ----------------------------------
3019
3020 // Load the function
3021 frame_->EmitPush(cp);
3022 __ mov(r0, Operand(var->name()));
3023 frame_->EmitPush(r0);
3024 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3025 // r0: slot value; r1: receiver
3026
3027 // Load the receiver.
3028 frame_->EmitPush(r0); // function
3029 frame_->EmitPush(r1); // receiver
3030
3031 // Call the function.
3032 CallWithArguments(args, node->position());
3033 frame_->EmitPush(r0);
3034
3035 } else if (property != NULL) {
3036 // Check if the key is a literal string.
3037 Literal* literal = property->key()->AsLiteral();
3038
3039 if (literal != NULL && literal->handle()->IsSymbol()) {
3040 // ------------------------------------------------------------------
3041 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3042 // ------------------------------------------------------------------
3043
3044 // Push the name of the function and the receiver onto the stack.
3045 __ mov(r0, Operand(literal->handle()));
3046 frame_->EmitPush(r0);
3047 LoadAndSpill(property->obj());
3048
3049 // Load the arguments.
3050 int arg_count = args->length();
3051 for (int i = 0; i < arg_count; i++) {
3052 LoadAndSpill(args->at(i));
3053 }
3054
3055 // Set the receiver register and call the IC initialization code.
3056 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3057 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3058 CodeForSourcePosition(node->position());
3059 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3060 __ ldr(cp, frame_->Context());
3061
3062 // Remove the function from the stack.
3063 frame_->Drop();
3064
3065 frame_->EmitPush(r0); // push after get rid of function from the stack
3066
3067 } else {
3068 // -------------------------------------------
3069 // JavaScript example: 'array[index](1, 2, 3)'
3070 // -------------------------------------------
3071
3072 // Load the function to call from the property through a reference.
3073 Reference ref(this, property);
3074 ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
3075
3076 // Pass receiver to called function.
3077 if (property->is_synthetic()) {
3078 LoadGlobalReceiver(r0);
3079 } else {
3080 __ ldr(r0, frame_->ElementAt(ref.size()));
3081 frame_->EmitPush(r0);
3082 }
3083
3084 // Call the function.
3085 CallWithArguments(args, node->position());
3086 frame_->EmitPush(r0);
3087 }
3088
3089 } else {
3090 // ----------------------------------
3091 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3092 // ----------------------------------
3093
3094 // Load the function.
3095 LoadAndSpill(function);
3096
3097 // Pass the global proxy as the receiver.
3098 LoadGlobalReceiver(r0);
3099
3100 // Call the function.
3101 CallWithArguments(args, node->position());
3102 frame_->EmitPush(r0);
3103 }
3104 ASSERT(frame_->height() == original_height + 1);
3105}
3106
3107
3108void CodeGenerator::VisitCallNew(CallNew* node) {
3109#ifdef DEBUG
3110 int original_height = frame_->height();
3111#endif
3112 VirtualFrame::SpilledScope spilled_scope;
3113 Comment cmnt(masm_, "[ CallNew");
3114
3115 // According to ECMA-262, section 11.2.2, page 44, the function
3116 // expression in new calls must be evaluated before the
3117 // arguments. This is different from ordinary calls, where the
3118 // actual function to call is resolved after the arguments have been
3119 // evaluated.
3120
3121 // Compute function to call and use the global object as the
3122 // receiver. There is no need to use the global proxy here because
3123 // it will always be replaced with a newly allocated object.
3124 LoadAndSpill(node->expression());
3125 LoadGlobal();
3126
3127 // Push the arguments ("left-to-right") on the stack.
3128 ZoneList<Expression*>* args = node->arguments();
3129 int arg_count = args->length();
3130 for (int i = 0; i < arg_count; i++) {
3131 LoadAndSpill(args->at(i));
3132 }
3133
3134 // r0: the number of arguments.
3135 Result num_args(r0);
3136 __ mov(r0, Operand(arg_count));
3137
3138 // Load the function into r1 as per calling convention.
3139 Result function(r1);
3140 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3141
3142 // Call the construct call builtin that handles allocation and
3143 // constructor invocation.
3144 CodeForSourcePosition(node->position());
3145 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
3146 frame_->CallCodeObject(ic,
3147 RelocInfo::CONSTRUCT_CALL,
3148 &num_args,
3149 &function,
3150 arg_count + 1);
3151
3152 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3153 __ str(r0, frame_->Top());
3154 ASSERT(frame_->height() == original_height + 1);
3155}
3156
3157
3158void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3159 VirtualFrame::SpilledScope spilled_scope;
3160 ASSERT(args->length() == 1);
3161 JumpTarget leave, null, function, non_function_constructor;
3162
3163 // Load the object into r0.
3164 LoadAndSpill(args->at(0));
3165 frame_->EmitPop(r0);
3166
3167 // If the object is a smi, we return null.
3168 __ tst(r0, Operand(kSmiTagMask));
3169 null.Branch(eq);
3170
3171 // Check that the object is a JS object but take special care of JS
3172 // functions to make sure they have 'Function' as their class.
3173 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3174 null.Branch(lt);
3175
3176 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3177 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3178 // LAST_JS_OBJECT_TYPE.
3179 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3180 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3181 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3182 function.Branch(eq);
3183
3184 // Check if the constructor in the map is a function.
3185 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3186 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3187 non_function_constructor.Branch(ne);
3188
3189 // The r0 register now contains the constructor function. Grab the
3190 // instance class name from there.
3191 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3192 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3193 frame_->EmitPush(r0);
3194 leave.Jump();
3195
3196 // Functions have class 'Function'.
3197 function.Bind();
3198 __ mov(r0, Operand(Factory::function_class_symbol()));
3199 frame_->EmitPush(r0);
3200 leave.Jump();
3201
3202 // Objects with a non-function constructor have class 'Object'.
3203 non_function_constructor.Bind();
3204 __ mov(r0, Operand(Factory::Object_symbol()));
3205 frame_->EmitPush(r0);
3206 leave.Jump();
3207
3208 // Non-JS objects have class null.
3209 null.Bind();
3210 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3211 frame_->EmitPush(r0);
3212
3213 // All done.
3214 leave.Bind();
3215}
3216
3217
3218void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3219 VirtualFrame::SpilledScope spilled_scope;
3220 ASSERT(args->length() == 1);
3221 JumpTarget leave;
3222 LoadAndSpill(args->at(0));
3223 frame_->EmitPop(r0); // r0 contains object.
3224 // if (object->IsSmi()) return the object.
3225 __ tst(r0, Operand(kSmiTagMask));
3226 leave.Branch(eq);
3227 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3228 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3229 leave.Branch(ne);
3230 // Load the value.
3231 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3232 leave.Bind();
3233 frame_->EmitPush(r0);
3234}
3235
3236
3237void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3238 VirtualFrame::SpilledScope spilled_scope;
3239 ASSERT(args->length() == 2);
3240 JumpTarget leave;
3241 LoadAndSpill(args->at(0)); // Load the object.
3242 LoadAndSpill(args->at(1)); // Load the value.
3243 frame_->EmitPop(r0); // r0 contains value
3244 frame_->EmitPop(r1); // r1 contains object
3245 // if (object->IsSmi()) return object.
3246 __ tst(r1, Operand(kSmiTagMask));
3247 leave.Branch(eq);
3248 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3249 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3250 leave.Branch(ne);
3251 // Store the value.
3252 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3253 // Update the write barrier.
3254 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3255 __ RecordWrite(r1, r2, r3);
3256 // Leave.
3257 leave.Bind();
3258 frame_->EmitPush(r0);
3259}
3260
3261
3262void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3263 VirtualFrame::SpilledScope spilled_scope;
3264 ASSERT(args->length() == 1);
3265 LoadAndSpill(args->at(0));
3266 frame_->EmitPop(r0);
3267 __ tst(r0, Operand(kSmiTagMask));
3268 cc_reg_ = eq;
3269}
3270
3271
3272void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3273 VirtualFrame::SpilledScope spilled_scope;
3274 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3275 ASSERT_EQ(args->length(), 3);
3276#ifdef ENABLE_LOGGING_AND_PROFILING
3277 if (ShouldGenerateLog(args->at(0))) {
3278 LoadAndSpill(args->at(1));
3279 LoadAndSpill(args->at(2));
3280 __ CallRuntime(Runtime::kLog, 2);
3281 }
3282#endif
3283 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3284 frame_->EmitPush(r0);
3285}
3286
3287
3288void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3289 VirtualFrame::SpilledScope spilled_scope;
3290 ASSERT(args->length() == 1);
3291 LoadAndSpill(args->at(0));
3292 frame_->EmitPop(r0);
3293 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3294 cc_reg_ = eq;
3295}
3296
3297
3298// This should generate code that performs a charCodeAt() call or returns
3299// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3300// It is not yet implemented on ARM, so it always goes to the slow case.
3301void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3302 VirtualFrame::SpilledScope spilled_scope;
3303 ASSERT(args->length() == 2);
3304 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3305 frame_->EmitPush(r0);
3306}
3307
3308
3309void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3310 VirtualFrame::SpilledScope spilled_scope;
3311 ASSERT(args->length() == 1);
3312 LoadAndSpill(args->at(0));
3313 JumpTarget answer;
3314 // We need the CC bits to come out as not_equal in the case where the
3315 // object is a smi. This can't be done with the usual test opcode so
3316 // we use XOR to get the right CC bits.
3317 frame_->EmitPop(r0);
3318 __ and_(r1, r0, Operand(kSmiTagMask));
3319 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3320 answer.Branch(ne);
3321 // It is a heap object - get the map. Check if the object is a JS array.
3322 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3323 answer.Bind();
3324 cc_reg_ = eq;
3325}
3326
3327
3328void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3329 VirtualFrame::SpilledScope spilled_scope;
3330 ASSERT(args->length() == 0);
3331
3332 // Get the frame pointer for the calling frame.
3333 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3334
3335 // Skip the arguments adaptor frame if it exists.
3336 Label check_frame_marker;
3337 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3338 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3339 __ b(ne, &check_frame_marker);
3340 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3341
3342 // Check the marker in the calling frame.
3343 __ bind(&check_frame_marker);
3344 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3345 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3346 cc_reg_ = eq;
3347}
3348
3349
3350void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3351 VirtualFrame::SpilledScope spilled_scope;
3352 ASSERT(args->length() == 0);
3353
3354 // Seed the result with the formal parameters count, which will be used
3355 // in case no arguments adaptor frame is found below the current frame.
3356 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3357
3358 // Call the shared stub to get to the arguments.length.
3359 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3360 frame_->CallStub(&stub, 0);
3361 frame_->EmitPush(r0);
3362}
3363
3364
3365void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3366 VirtualFrame::SpilledScope spilled_scope;
3367 ASSERT(args->length() == 1);
3368
3369 // Satisfy contract with ArgumentsAccessStub:
3370 // Load the key into r1 and the formal parameters count into r0.
3371 LoadAndSpill(args->at(0));
3372 frame_->EmitPop(r1);
3373 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3374
3375 // Call the shared stub to get to arguments[key].
3376 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3377 frame_->CallStub(&stub, 0);
3378 frame_->EmitPush(r0);
3379}
3380
3381
3382void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3383 VirtualFrame::SpilledScope spilled_scope;
3384 ASSERT(args->length() == 0);
3385 __ Call(ExternalReference::random_positive_smi_function().address(),
3386 RelocInfo::RUNTIME_ENTRY);
3387 frame_->EmitPush(r0);
3388}
3389
3390
3391void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3392 VirtualFrame::SpilledScope spilled_scope;
3393 LoadAndSpill(args->at(0));
3394 switch (op) {
3395 case SIN:
3396 frame_->CallRuntime(Runtime::kMath_sin, 1);
3397 break;
3398 case COS:
3399 frame_->CallRuntime(Runtime::kMath_cos, 1);
3400 break;
3401 }
3402 frame_->EmitPush(r0);
3403}
3404
3405
3406void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3407 VirtualFrame::SpilledScope spilled_scope;
3408 ASSERT(args->length() == 2);
3409
3410 // Load the two objects into registers and perform the comparison.
3411 LoadAndSpill(args->at(0));
3412 LoadAndSpill(args->at(1));
3413 frame_->EmitPop(r0);
3414 frame_->EmitPop(r1);
3415 __ cmp(r0, Operand(r1));
3416 cc_reg_ = eq;
3417}
3418
3419
3420void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3421#ifdef DEBUG
3422 int original_height = frame_->height();
3423#endif
3424 VirtualFrame::SpilledScope spilled_scope;
3425 if (CheckForInlineRuntimeCall(node)) {
3426 ASSERT((has_cc() && frame_->height() == original_height) ||
3427 (!has_cc() && frame_->height() == original_height + 1));
3428 return;
3429 }
3430
3431 ZoneList<Expression*>* args = node->arguments();
3432 Comment cmnt(masm_, "[ CallRuntime");
3433 Runtime::Function* function = node->function();
3434
3435 if (function == NULL) {
3436 // Prepare stack for calling JS runtime function.
3437 __ mov(r0, Operand(node->name()));
3438 frame_->EmitPush(r0);
3439 // Push the builtins object found in the current global object.
3440 __ ldr(r1, GlobalObject());
3441 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3442 frame_->EmitPush(r0);
3443 }
3444
3445 // Push the arguments ("left-to-right").
3446 int arg_count = args->length();
3447 for (int i = 0; i < arg_count; i++) {
3448 LoadAndSpill(args->at(i));
3449 }
3450
3451 if (function == NULL) {
3452 // Call the JS runtime function.
3453 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3454 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3455 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3456 __ ldr(cp, frame_->Context());
3457 frame_->Drop();
3458 frame_->EmitPush(r0);
3459 } else {
3460 // Call the C runtime function.
3461 frame_->CallRuntime(function, arg_count);
3462 frame_->EmitPush(r0);
3463 }
3464 ASSERT(frame_->height() == original_height + 1);
3465}
3466
3467
3468void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3469#ifdef DEBUG
3470 int original_height = frame_->height();
3471#endif
3472 VirtualFrame::SpilledScope spilled_scope;
3473 Comment cmnt(masm_, "[ UnaryOperation");
3474
3475 Token::Value op = node->op();
3476
3477 if (op == Token::NOT) {
3478 LoadConditionAndSpill(node->expression(),
3479 NOT_INSIDE_TYPEOF,
3480 false_target(),
3481 true_target(),
3482 true);
3483 // LoadCondition may (and usually does) leave a test and branch to
3484 // be emitted by the caller. In that case, negate the condition.
3485 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3486
3487 } else if (op == Token::DELETE) {
3488 Property* property = node->expression()->AsProperty();
3489 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3490 if (property != NULL) {
3491 LoadAndSpill(property->obj());
3492 LoadAndSpill(property->key());
3493 Result arg_count(r0);
3494 __ mov(r0, Operand(1)); // not counting receiver
3495 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
3496
3497 } else if (variable != NULL) {
3498 Slot* slot = variable->slot();
3499 if (variable->is_global()) {
3500 LoadGlobal();
3501 __ mov(r0, Operand(variable->name()));
3502 frame_->EmitPush(r0);
3503 Result arg_count(r0);
3504 __ mov(r0, Operand(1)); // not counting receiver
3505 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
3506
3507 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3508 // lookup the context holding the named variable
3509 frame_->EmitPush(cp);
3510 __ mov(r0, Operand(variable->name()));
3511 frame_->EmitPush(r0);
3512 frame_->CallRuntime(Runtime::kLookupContext, 2);
3513 // r0: context
3514 frame_->EmitPush(r0);
3515 __ mov(r0, Operand(variable->name()));
3516 frame_->EmitPush(r0);
3517 Result arg_count(r0);
3518 __ mov(r0, Operand(1)); // not counting receiver
3519 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
3520
3521 } else {
3522 // Default: Result of deleting non-global, not dynamically
3523 // introduced variables is false.
3524 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3525 }
3526
3527 } else {
3528 // Default: Result of deleting expressions is true.
3529 LoadAndSpill(node->expression()); // may have side-effects
3530 frame_->Drop();
3531 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3532 }
3533 frame_->EmitPush(r0);
3534
3535 } else if (op == Token::TYPEOF) {
3536 // Special case for loading the typeof expression; see comment on
3537 // LoadTypeofExpression().
3538 LoadTypeofExpression(node->expression());
3539 frame_->CallRuntime(Runtime::kTypeof, 1);
3540 frame_->EmitPush(r0); // r0 has result
3541
3542 } else {
3543 LoadAndSpill(node->expression());
3544 frame_->EmitPop(r0);
3545 switch (op) {
3546 case Token::NOT:
3547 case Token::DELETE:
3548 case Token::TYPEOF:
3549 UNREACHABLE(); // handled above
3550 break;
3551
3552 case Token::SUB: {
3553 bool overwrite =
3554 (node->expression()->AsBinaryOperation() != NULL &&
3555 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
3556 UnarySubStub stub(overwrite);
3557 frame_->CallStub(&stub, 0);
3558 break;
3559 }
3560
3561 case Token::BIT_NOT: {
3562 // smi check
3563 JumpTarget smi_label;
3564 JumpTarget continue_label;
3565 __ tst(r0, Operand(kSmiTagMask));
3566 smi_label.Branch(eq);
3567
3568 frame_->EmitPush(r0);
3569 Result arg_count(r0);
3570 __ mov(r0, Operand(0)); // not counting receiver
3571 frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
3572
3573 continue_label.Jump();
3574 smi_label.Bind();
3575 __ mvn(r0, Operand(r0));
3576 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3577 continue_label.Bind();
3578 break;
3579 }
3580
3581 case Token::VOID:
3582 // since the stack top is cached in r0, popping and then
3583 // pushing a value can be done by just writing to r0.
3584 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3585 break;
3586
3587 case Token::ADD: {
3588 // Smi check.
3589 JumpTarget continue_label;
3590 __ tst(r0, Operand(kSmiTagMask));
3591 continue_label.Branch(eq);
3592 frame_->EmitPush(r0);
3593 Result arg_count(r0);
3594 __ mov(r0, Operand(0)); // not counting receiver
3595 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
3596 continue_label.Bind();
3597 break;
3598 }
3599 default:
3600 UNREACHABLE();
3601 }
3602 frame_->EmitPush(r0); // r0 has result
3603 }
3604 ASSERT(!has_valid_frame() ||
3605 (has_cc() && frame_->height() == original_height) ||
3606 (!has_cc() && frame_->height() == original_height + 1));
3607}
3608
3609
3610void CodeGenerator::VisitCountOperation(CountOperation* node) {
3611#ifdef DEBUG
3612 int original_height = frame_->height();
3613#endif
3614 VirtualFrame::SpilledScope spilled_scope;
3615 Comment cmnt(masm_, "[ CountOperation");
3616
3617 bool is_postfix = node->is_postfix();
3618 bool is_increment = node->op() == Token::INC;
3619
3620 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3621 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3622
3623 // Postfix: Make room for the result.
3624 if (is_postfix) {
3625 __ mov(r0, Operand(0));
3626 frame_->EmitPush(r0);
3627 }
3628
3629 { Reference target(this, node->expression());
3630 if (target.is_illegal()) {
3631 // Spoof the virtual frame to have the expected height (one higher
3632 // than on entry).
3633 if (!is_postfix) {
3634 __ mov(r0, Operand(Smi::FromInt(0)));
3635 frame_->EmitPush(r0);
3636 }
3637 ASSERT(frame_->height() == original_height + 1);
3638 return;
3639 }
3640 target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
3641 frame_->EmitPop(r0);
3642
3643 JumpTarget slow;
3644 JumpTarget exit;
3645
3646 // Load the value (1) into register r1.
3647 __ mov(r1, Operand(Smi::FromInt(1)));
3648
3649 // Check for smi operand.
3650 __ tst(r0, Operand(kSmiTagMask));
3651 slow.Branch(ne);
3652
3653 // Postfix: Store the old value as the result.
3654 if (is_postfix) {
3655 __ str(r0, frame_->ElementAt(target.size()));
3656 }
3657
3658 // Perform optimistic increment/decrement.
3659 if (is_increment) {
3660 __ add(r0, r0, Operand(r1), SetCC);
3661 } else {
3662 __ sub(r0, r0, Operand(r1), SetCC);
3663 }
3664
3665 // If the increment/decrement didn't overflow, we're done.
3666 exit.Branch(vc);
3667
3668 // Revert optimistic increment/decrement.
3669 if (is_increment) {
3670 __ sub(r0, r0, Operand(r1));
3671 } else {
3672 __ add(r0, r0, Operand(r1));
3673 }
3674
3675 // Slow case: Convert to number.
3676 slow.Bind();
3677 {
3678 // Convert the operand to a number.
3679 frame_->EmitPush(r0);
3680 Result arg_count(r0);
3681 __ mov(r0, Operand(0));
3682 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
3683 }
3684 if (is_postfix) {
3685 // Postfix: store to result (on the stack).
3686 __ str(r0, frame_->ElementAt(target.size()));
3687 }
3688
3689 // Compute the new value.
3690 __ mov(r1, Operand(Smi::FromInt(1)));
3691 frame_->EmitPush(r0);
3692 frame_->EmitPush(r1);
3693 if (is_increment) {
3694 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3695 } else {
3696 frame_->CallRuntime(Runtime::kNumberSub, 2);
3697 }
3698
3699 // Store the new value in the target if not const.
3700 exit.Bind();
3701 frame_->EmitPush(r0);
3702 if (!is_const) target.SetValue(NOT_CONST_INIT);
3703 }
3704
3705 // Postfix: Discard the new value and use the old.
3706 if (is_postfix) frame_->EmitPop(r0);
3707 ASSERT(frame_->height() == original_height + 1);
3708}
3709
3710
3711void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3712#ifdef DEBUG
3713 int original_height = frame_->height();
3714#endif
3715 VirtualFrame::SpilledScope spilled_scope;
3716 Comment cmnt(masm_, "[ BinaryOperation");
3717 Token::Value op = node->op();
3718
3719 // According to ECMA-262 section 11.11, page 58, the binary logical
3720 // operators must yield the result of one of the two expressions
3721 // before any ToBoolean() conversions. This means that the value
3722 // produced by a && or || operator is not necessarily a boolean.
3723
3724 // NOTE: If the left hand side produces a materialized value (not in
3725 // the CC register), we force the right hand side to do the
3726 // same. This is necessary because we may have to branch to the exit
3727 // after evaluating the left hand side (due to the shortcut
3728 // semantics), but the compiler must (statically) know if the result
3729 // of compiling the binary operation is materialized or not.
3730
3731 if (op == Token::AND) {
3732 JumpTarget is_true;
3733 LoadConditionAndSpill(node->left(),
3734 NOT_INSIDE_TYPEOF,
3735 &is_true,
3736 false_target(),
3737 false);
3738 if (has_valid_frame() && !has_cc()) {
3739 // The left-hand side result is on top of the virtual frame.
3740 JumpTarget pop_and_continue;
3741 JumpTarget exit;
3742
3743 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3744 frame_->EmitPush(r0);
3745 // Avoid popping the result if it converts to 'false' using the
3746 // standard ToBoolean() conversion as described in ECMA-262,
3747 // section 9.2, page 30.
3748 ToBoolean(&pop_and_continue, &exit);
3749 Branch(false, &exit);
3750
3751 // Pop the result of evaluating the first part.
3752 pop_and_continue.Bind();
3753 frame_->EmitPop(r0);
3754
3755 // Evaluate right side expression.
3756 is_true.Bind();
3757 LoadAndSpill(node->right());
3758
3759 // Exit (always with a materialized value).
3760 exit.Bind();
3761 } else if (has_cc() || is_true.is_linked()) {
3762 // The left-hand side is either (a) partially compiled to
3763 // control flow with a final branch left to emit or (b) fully
3764 // compiled to control flow and possibly true.
3765 if (has_cc()) {
3766 Branch(false, false_target());
3767 }
3768 is_true.Bind();
3769 LoadConditionAndSpill(node->right(),
3770 NOT_INSIDE_TYPEOF,
3771 true_target(),
3772 false_target(),
3773 false);
3774 } else {
3775 // Nothing to do.
3776 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
3777 }
3778
3779 } else if (op == Token::OR) {
3780 JumpTarget is_false;
3781 LoadConditionAndSpill(node->left(),
3782 NOT_INSIDE_TYPEOF,
3783 true_target(),
3784 &is_false,
3785 false);
3786 if (has_valid_frame() && !has_cc()) {
3787 // The left-hand side result is on top of the virtual frame.
3788 JumpTarget pop_and_continue;
3789 JumpTarget exit;
3790
3791 __ ldr(r0, frame_->Top());
3792 frame_->EmitPush(r0);
3793 // Avoid popping the result if it converts to 'true' using the
3794 // standard ToBoolean() conversion as described in ECMA-262,
3795 // section 9.2, page 30.
3796 ToBoolean(&exit, &pop_and_continue);
3797 Branch(true, &exit);
3798
3799 // Pop the result of evaluating the first part.
3800 pop_and_continue.Bind();
3801 frame_->EmitPop(r0);
3802
3803 // Evaluate right side expression.
3804 is_false.Bind();
3805 LoadAndSpill(node->right());
3806
3807 // Exit (always with a materialized value).
3808 exit.Bind();
3809 } else if (has_cc() || is_false.is_linked()) {
3810 // The left-hand side is either (a) partially compiled to
3811 // control flow with a final branch left to emit or (b) fully
3812 // compiled to control flow and possibly false.
3813 if (has_cc()) {
3814 Branch(true, true_target());
3815 }
3816 is_false.Bind();
3817 LoadConditionAndSpill(node->right(),
3818 NOT_INSIDE_TYPEOF,
3819 true_target(),
3820 false_target(),
3821 false);
3822 } else {
3823 // Nothing to do.
3824 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
3825 }
3826
3827 } else {
3828 // Optimize for the case where (at least) one of the expressions
3829 // is a literal small integer.
3830 Literal* lliteral = node->left()->AsLiteral();
3831 Literal* rliteral = node->right()->AsLiteral();
3832 // NOTE: The code below assumes that the slow cases (calls to runtime)
3833 // never return a constant/immutable object.
3834 bool overwrite_left =
3835 (node->left()->AsBinaryOperation() != NULL &&
3836 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
3837 bool overwrite_right =
3838 (node->right()->AsBinaryOperation() != NULL &&
3839 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
3840
3841 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
3842 LoadAndSpill(node->left());
3843 SmiOperation(node->op(),
3844 rliteral->handle(),
3845 false,
3846 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
3847
3848 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
3849 LoadAndSpill(node->right());
3850 SmiOperation(node->op(),
3851 lliteral->handle(),
3852 true,
3853 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
3854
3855 } else {
3856 OverwriteMode overwrite_mode = NO_OVERWRITE;
3857 if (overwrite_left) {
3858 overwrite_mode = OVERWRITE_LEFT;
3859 } else if (overwrite_right) {
3860 overwrite_mode = OVERWRITE_RIGHT;
3861 }
3862 LoadAndSpill(node->left());
3863 LoadAndSpill(node->right());
3864 GenericBinaryOperation(node->op(), overwrite_mode);
3865 }
3866 frame_->EmitPush(r0);
3867 }
3868 ASSERT(!has_valid_frame() ||
3869 (has_cc() && frame_->height() == original_height) ||
3870 (!has_cc() && frame_->height() == original_height + 1));
3871}
3872
3873
3874void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3875#ifdef DEBUG
3876 int original_height = frame_->height();
3877#endif
3878 VirtualFrame::SpilledScope spilled_scope;
3879 __ ldr(r0, frame_->Function());
3880 frame_->EmitPush(r0);
3881 ASSERT(frame_->height() == original_height + 1);
3882}
3883
3884
3885void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3886#ifdef DEBUG
3887 int original_height = frame_->height();
3888#endif
3889 VirtualFrame::SpilledScope spilled_scope;
3890 Comment cmnt(masm_, "[ CompareOperation");
3891
3892 // Get the expressions from the node.
3893 Expression* left = node->left();
3894 Expression* right = node->right();
3895 Token::Value op = node->op();
3896
3897 // To make null checks efficient, we check if either left or right is the
3898 // literal 'null'. If so, we optimize the code by inlining a null check
3899 // instead of calling the (very) general runtime routine for checking
3900 // equality.
3901 if (op == Token::EQ || op == Token::EQ_STRICT) {
3902 bool left_is_null =
3903 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
3904 bool right_is_null =
3905 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
3906 // The 'null' value can only be equal to 'null' or 'undefined'.
3907 if (left_is_null || right_is_null) {
3908 LoadAndSpill(left_is_null ? right : left);
3909 frame_->EmitPop(r0);
3910 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3911 __ cmp(r0, ip);
3912
3913 // The 'null' value is only equal to 'undefined' if using non-strict
3914 // comparisons.
3915 if (op != Token::EQ_STRICT) {
3916 true_target()->Branch(eq);
3917
3918 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3919 __ cmp(r0, Operand(ip));
3920 true_target()->Branch(eq);
3921
3922 __ tst(r0, Operand(kSmiTagMask));
3923 false_target()->Branch(eq);
3924
3925 // It can be an undetectable object.
3926 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
3927 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
3928 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
3929 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
3930 }
3931
3932 cc_reg_ = eq;
3933 ASSERT(has_cc() && frame_->height() == original_height);
3934 return;
3935 }
3936 }
3937
3938 // To make typeof testing for natives implemented in JavaScript really
3939 // efficient, we generate special code for expressions of the form:
3940 // 'typeof <expression> == <string>'.
3941 UnaryOperation* operation = left->AsUnaryOperation();
3942 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3943 (operation != NULL && operation->op() == Token::TYPEOF) &&
3944 (right->AsLiteral() != NULL &&
3945 right->AsLiteral()->handle()->IsString())) {
3946 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
3947
3948 // Load the operand, move it to register r1.
3949 LoadTypeofExpression(operation->expression());
3950 frame_->EmitPop(r1);
3951
3952 if (check->Equals(Heap::number_symbol())) {
3953 __ tst(r1, Operand(kSmiTagMask));
3954 true_target()->Branch(eq);
3955 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
3956 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3957 __ cmp(r1, ip);
3958 cc_reg_ = eq;
3959
3960 } else if (check->Equals(Heap::string_symbol())) {
3961 __ tst(r1, Operand(kSmiTagMask));
3962 false_target()->Branch(eq);
3963
3964 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
3965
3966 // It can be an undetectable string object.
3967 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
3968 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
3969 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
3970 false_target()->Branch(eq);
3971
3972 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3973 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
3974 cc_reg_ = lt;
3975
3976 } else if (check->Equals(Heap::boolean_symbol())) {
3977 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
3978 __ cmp(r1, ip);
3979 true_target()->Branch(eq);
3980 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
3981 __ cmp(r1, ip);
3982 cc_reg_ = eq;
3983
3984 } else if (check->Equals(Heap::undefined_symbol())) {
3985 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3986 __ cmp(r1, ip);
3987 true_target()->Branch(eq);
3988
3989 __ tst(r1, Operand(kSmiTagMask));
3990 false_target()->Branch(eq);
3991
3992 // It can be an undetectable object.
3993 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
3994 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
3995 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
3996 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
3997
3998 cc_reg_ = eq;
3999
4000 } else if (check->Equals(Heap::function_symbol())) {
4001 __ tst(r1, Operand(kSmiTagMask));
4002 false_target()->Branch(eq);
4003 __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
4004 cc_reg_ = eq;
4005
4006 } else if (check->Equals(Heap::object_symbol())) {
4007 __ tst(r1, Operand(kSmiTagMask));
4008 false_target()->Branch(eq);
4009
4010 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
4011 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4012 __ cmp(r1, ip);
4013 true_target()->Branch(eq);
4014
4015 // It can be an undetectable object.
4016 __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
4017 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4018 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4019 false_target()->Branch(eq);
4020
4021 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
4022 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
4023 false_target()->Branch(lt);
4024 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
4025 cc_reg_ = le;
4026
4027 } else {
4028 // Uncommon case: typeof testing against a string literal that is
4029 // never returned from the typeof operator.
4030 false_target()->Jump();
4031 }
4032 ASSERT(!has_valid_frame() ||
4033 (has_cc() && frame_->height() == original_height));
4034 return;
4035 }
4036
4037 switch (op) {
4038 case Token::EQ:
4039 Comparison(eq, left, right, false);
4040 break;
4041
4042 case Token::LT:
4043 Comparison(lt, left, right);
4044 break;
4045
4046 case Token::GT:
4047 Comparison(gt, left, right);
4048 break;
4049
4050 case Token::LTE:
4051 Comparison(le, left, right);
4052 break;
4053
4054 case Token::GTE:
4055 Comparison(ge, left, right);
4056 break;
4057
4058 case Token::EQ_STRICT:
4059 Comparison(eq, left, right, true);
4060 break;
4061
4062 case Token::IN: {
4063 LoadAndSpill(left);
4064 LoadAndSpill(right);
4065 Result arg_count(r0);
4066 __ mov(r0, Operand(1)); // not counting receiver
4067 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
4068 frame_->EmitPush(r0);
4069 break;
4070 }
4071
4072 case Token::INSTANCEOF: {
4073 LoadAndSpill(left);
4074 LoadAndSpill(right);
4075 InstanceofStub stub;
4076 frame_->CallStub(&stub, 2);
4077 // At this point if instanceof succeeded then r0 == 0.
4078 __ tst(r0, Operand(r0));
4079 cc_reg_ = eq;
4080 break;
4081 }
4082
4083 default:
4084 UNREACHABLE();
4085 }
4086 ASSERT((has_cc() && frame_->height() == original_height) ||
4087 (!has_cc() && frame_->height() == original_height + 1));
4088}
4089
4090
4091#ifdef DEBUG
4092bool CodeGenerator::HasValidEntryRegisters() { return true; }
4093#endif
4094
4095
4096#undef __
4097#define __ ACCESS_MASM(masm)
4098
4099
4100Handle<String> Reference::GetName() {
4101 ASSERT(type_ == NAMED);
4102 Property* property = expression_->AsProperty();
4103 if (property == NULL) {
4104 // Global variable reference treated as a named property reference.
4105 VariableProxy* proxy = expression_->AsVariableProxy();
4106 ASSERT(proxy->AsVariable() != NULL);
4107 ASSERT(proxy->AsVariable()->is_global());
4108 return proxy->name();
4109 } else {
4110 Literal* raw_name = property->key()->AsLiteral();
4111 ASSERT(raw_name != NULL);
4112 return Handle<String>(String::cast(*raw_name->handle()));
4113 }
4114}
4115
4116
4117void Reference::GetValue(TypeofState typeof_state) {
4118 ASSERT(cgen_->HasValidEntryRegisters());
4119 ASSERT(!is_illegal());
4120 ASSERT(!cgen_->has_cc());
4121 MacroAssembler* masm = cgen_->masm();
4122 Property* property = expression_->AsProperty();
4123 if (property != NULL) {
4124 cgen_->CodeForSourcePosition(property->position());
4125 }
4126
4127 switch (type_) {
4128 case SLOT: {
4129 Comment cmnt(masm, "[ Load from Slot");
4130 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4131 ASSERT(slot != NULL);
4132 cgen_->LoadFromSlot(slot, typeof_state);
4133 break;
4134 }
4135
4136 case NAMED: {
4137 // TODO(1241834): Make sure that this it is safe to ignore the
4138 // distinction between expressions in a typeof and not in a typeof. If
4139 // there is a chance that reference errors can be thrown below, we
4140 // must distinguish between the two kinds of loads (typeof expression
4141 // loads must not throw a reference error).
4142 VirtualFrame* frame = cgen_->frame();
4143 Comment cmnt(masm, "[ Load from named Property");
4144 Handle<String> name(GetName());
4145 Variable* var = expression_->AsVariableProxy()->AsVariable();
4146 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4147 // Setup the name register.
4148 Result name_reg(r2);
4149 __ mov(r2, Operand(name));
4150 ASSERT(var == NULL || var->is_global());
4151 RelocInfo::Mode rmode = (var == NULL)
4152 ? RelocInfo::CODE_TARGET
4153 : RelocInfo::CODE_TARGET_CONTEXT;
4154 frame->CallCodeObject(ic, rmode, &name_reg, 0);
4155 frame->EmitPush(r0);
4156 break;
4157 }
4158
4159 case KEYED: {
4160 // TODO(1241834): Make sure that this it is safe to ignore the
4161 // distinction between expressions in a typeof and not in a typeof.
4162
4163 // TODO(181): Implement inlined version of array indexing once
4164 // loop nesting is properly tracked on ARM.
4165 VirtualFrame* frame = cgen_->frame();
4166 Comment cmnt(masm, "[ Load from keyed Property");
4167 ASSERT(property != NULL);
4168 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4169 Variable* var = expression_->AsVariableProxy()->AsVariable();
4170 ASSERT(var == NULL || var->is_global());
4171 RelocInfo::Mode rmode = (var == NULL)
4172 ? RelocInfo::CODE_TARGET
4173 : RelocInfo::CODE_TARGET_CONTEXT;
4174 frame->CallCodeObject(ic, rmode, 0);
4175 frame->EmitPush(r0);
4176 break;
4177 }
4178
4179 default:
4180 UNREACHABLE();
4181 }
4182}
4183
4184
4185void Reference::SetValue(InitState init_state) {
4186 ASSERT(!is_illegal());
4187 ASSERT(!cgen_->has_cc());
4188 MacroAssembler* masm = cgen_->masm();
4189 VirtualFrame* frame = cgen_->frame();
4190 Property* property = expression_->AsProperty();
4191 if (property != NULL) {
4192 cgen_->CodeForSourcePosition(property->position());
4193 }
4194
4195 switch (type_) {
4196 case SLOT: {
4197 Comment cmnt(masm, "[ Store to Slot");
4198 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4199 ASSERT(slot != NULL);
4200 if (slot->type() == Slot::LOOKUP) {
4201 ASSERT(slot->var()->is_dynamic());
4202
4203 // For now, just do a runtime call.
4204 frame->EmitPush(cp);
4205 __ mov(r0, Operand(slot->var()->name()));
4206 frame->EmitPush(r0);
4207
4208 if (init_state == CONST_INIT) {
4209 // Same as the case for a normal store, but ignores attribute
4210 // (e.g. READ_ONLY) of context slot so that we can initialize
4211 // const properties (introduced via eval("const foo = (some
4212 // expr);")). Also, uses the current function context instead of
4213 // the top context.
4214 //
4215 // Note that we must declare the foo upon entry of eval(), via a
4216 // context slot declaration, but we cannot initialize it at the
4217 // same time, because the const declaration may be at the end of
4218 // the eval code (sigh...) and the const variable may have been
4219 // used before (where its value is 'undefined'). Thus, we can only
4220 // do the initialization when we actually encounter the expression
4221 // and when the expression operands are defined and valid, and
4222 // thus we need the split into 2 operations: declaration of the
4223 // context slot followed by initialization.
4224 frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4225 } else {
4226 frame->CallRuntime(Runtime::kStoreContextSlot, 3);
4227 }
4228 // Storing a variable must keep the (new) value on the expression
4229 // stack. This is necessary for compiling assignment expressions.
4230 frame->EmitPush(r0);
4231
4232 } else {
4233 ASSERT(!slot->var()->is_dynamic());
4234
4235 JumpTarget exit;
4236 if (init_state == CONST_INIT) {
4237 ASSERT(slot->var()->mode() == Variable::CONST);
4238 // Only the first const initialization must be executed (the slot
4239 // still contains 'the hole' value). When the assignment is
4240 // executed, the code is identical to a normal store (see below).
4241 Comment cmnt(masm, "[ Init const");
4242 __ ldr(r2, cgen_->SlotOperand(slot, r2));
4243 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4244 __ cmp(r2, ip);
4245 exit.Branch(ne);
4246 }
4247
4248 // We must execute the store. Storing a variable must keep the
4249 // (new) value on the stack. This is necessary for compiling
4250 // assignment expressions.
4251 //
4252 // Note: We will reach here even with slot->var()->mode() ==
4253 // Variable::CONST because of const declarations which will
4254 // initialize consts to 'the hole' value and by doing so, end up
4255 // calling this code. r2 may be loaded with context; used below in
4256 // RecordWrite.
4257 frame->EmitPop(r0);
4258 __ str(r0, cgen_->SlotOperand(slot, r2));
4259 frame->EmitPush(r0);
4260 if (slot->type() == Slot::CONTEXT) {
4261 // Skip write barrier if the written value is a smi.
4262 __ tst(r0, Operand(kSmiTagMask));
4263 exit.Branch(eq);
4264 // r2 is loaded with context when calling SlotOperand above.
4265 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4266 __ mov(r3, Operand(offset));
4267 __ RecordWrite(r2, r3, r1);
4268 }
4269 // If we definitely did not jump over the assignment, we do not need
4270 // to bind the exit label. Doing so can defeat peephole
4271 // optimization.
4272 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
4273 exit.Bind();
4274 }
4275 }
4276 break;
4277 }
4278
4279 case NAMED: {
4280 Comment cmnt(masm, "[ Store to named Property");
4281 // Call the appropriate IC code.
4282 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4283 Handle<String> name(GetName());
4284
4285 Result value(r0);
4286 frame->EmitPop(r0);
4287
4288 // Setup the name register.
4289 Result property_name(r2);
4290 __ mov(r2, Operand(name));
4291 frame->CallCodeObject(ic,
4292 RelocInfo::CODE_TARGET,
4293 &value,
4294 &property_name,
4295 0);
4296 frame->EmitPush(r0);
4297 break;
4298 }
4299
4300 case KEYED: {
4301 Comment cmnt(masm, "[ Store to keyed Property");
4302 Property* property = expression_->AsProperty();
4303 ASSERT(property != NULL);
4304 cgen_->CodeForSourcePosition(property->position());
4305
4306 // Call IC code.
4307 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4308 // TODO(1222589): Make the IC grab the values from the stack.
4309 Result value(r0);
4310 frame->EmitPop(r0); // value
4311 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
4312 frame->EmitPush(r0);
4313 break;
4314 }
4315
4316 default:
4317 UNREACHABLE();
4318 }
4319}
4320
4321
4322// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4323// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4324// (31 instead of 32).
4325static void CountLeadingZeros(
4326 MacroAssembler* masm,
4327 Register source,
4328 Register scratch,
4329 Register zeros) {
4330#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4331 __ clz(zeros, source); // This instruction is only supported after ARM5.
4332#else
4333 __ mov(zeros, Operand(0));
4334 __ mov(scratch, source);
4335 // Top 16.
4336 __ tst(scratch, Operand(0xffff0000));
4337 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4338 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4339 // Top 8.
4340 __ tst(scratch, Operand(0xff000000));
4341 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4342 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4343 // Top 4.
4344 __ tst(scratch, Operand(0xf0000000));
4345 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4346 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4347 // Top 2.
4348 __ tst(scratch, Operand(0xc0000000));
4349 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4350 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4351 // Top bit.
4352 __ tst(scratch, Operand(0x80000000u));
4353 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4354#endif
4355}
4356
4357
4358// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4359// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4360// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4361// scratch register. Destroys the source register. No GC occurs during this
4362// stub so you don't have to set up the frame.
4363class ConvertToDoubleStub : public CodeStub {
4364 public:
4365 ConvertToDoubleStub(Register result_reg_1,
4366 Register result_reg_2,
4367 Register source_reg,
4368 Register scratch_reg)
4369 : result1_(result_reg_1),
4370 result2_(result_reg_2),
4371 source_(source_reg),
4372 zeros_(scratch_reg) { }
4373
4374 private:
4375 Register result1_;
4376 Register result2_;
4377 Register source_;
4378 Register zeros_;
4379
4380 // Minor key encoding in 16 bits.
4381 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4382 class OpBits: public BitField<Token::Value, 2, 14> {};
4383
4384 Major MajorKey() { return ConvertToDouble; }
4385 int MinorKey() {
4386 // Encode the parameters in a unique 16 bit value.
4387 return result1_.code() +
4388 (result2_.code() << 4) +
4389 (source_.code() << 8) +
4390 (zeros_.code() << 12);
4391 }
4392
4393 void Generate(MacroAssembler* masm);
4394
4395 const char* GetName() { return "ConvertToDoubleStub"; }
4396
4397#ifdef DEBUG
4398 void Print() { PrintF("ConvertToDoubleStub\n"); }
4399#endif
4400};
4401
4402
4403void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4404#ifndef BIG_ENDIAN_FLOATING_POINT
4405 Register exponent = result1_;
4406 Register mantissa = result2_;
4407#else
4408 Register exponent = result2_;
4409 Register mantissa = result1_;
4410#endif
4411 Label not_special;
4412 // Convert from Smi to integer.
4413 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4414 // Move sign bit from source to destination. This works because the sign bit
4415 // in the exponent word of the double has the same position and polarity as
4416 // the 2's complement sign bit in a Smi.
4417 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4418 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4419 // Subtract from 0 if source was negative.
4420 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4421 __ cmp(source_, Operand(1));
4422 __ b(gt, &not_special);
4423
4424 // We have -1, 0 or 1, which we treat specially.
4425 __ cmp(source_, Operand(0));
4426 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4427 static const uint32_t exponent_word_for_1 =
4428 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4429 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4430 // 1, 0 and -1 all have 0 for the second word.
4431 __ mov(mantissa, Operand(0));
4432 __ Ret();
4433
4434 __ bind(&not_special);
4435 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4436 // Gets the wrong answer for 0, but we already checked for that case above.
4437 CountLeadingZeros(masm, source_, mantissa, zeros_);
4438 // Compute exponent and or it into the exponent register.
4439 // We use result2 as a scratch register here.
4440 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4441 __ orr(exponent,
4442 exponent,
4443 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4444 // Shift up the source chopping the top bit off.
4445 __ add(zeros_, zeros_, Operand(1));
4446 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4447 __ mov(source_, Operand(source_, LSL, zeros_));
4448 // Compute lower part of fraction (last 12 bits).
4449 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4450 // And the top (top 20 bits).
4451 __ orr(exponent,
4452 exponent,
4453 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4454 __ Ret();
4455}
4456
4457
4458// This stub can convert a signed int32 to a heap number (double). It does
4459// not work for int32s that are in Smi range! No GC occurs during this stub
4460// so you don't have to set up the frame.
4461class WriteInt32ToHeapNumberStub : public CodeStub {
4462 public:
4463 WriteInt32ToHeapNumberStub(Register the_int,
4464 Register the_heap_number,
4465 Register scratch)
4466 : the_int_(the_int),
4467 the_heap_number_(the_heap_number),
4468 scratch_(scratch) { }
4469
4470 private:
4471 Register the_int_;
4472 Register the_heap_number_;
4473 Register scratch_;
4474
4475 // Minor key encoding in 16 bits.
4476 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4477 class OpBits: public BitField<Token::Value, 2, 14> {};
4478
4479 Major MajorKey() { return WriteInt32ToHeapNumber; }
4480 int MinorKey() {
4481 // Encode the parameters in a unique 16 bit value.
4482 return the_int_.code() +
4483 (the_heap_number_.code() << 4) +
4484 (scratch_.code() << 8);
4485 }
4486
4487 void Generate(MacroAssembler* masm);
4488
4489 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4490
4491#ifdef DEBUG
4492 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4493#endif
4494};
4495
4496
4497// See comment for class.
4498void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
4499 Label max_negative_int;
4500 // the_int_ has the answer which is a signed int32 but not a Smi.
4501 // We test for the special value that has a different exponent. This test
4502 // has the neat side effect of setting the flags according to the sign.
4503 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4504 __ cmp(the_int_, Operand(0x80000000u));
4505 __ b(eq, &max_negative_int);
4506 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4507 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4508 uint32_t non_smi_exponent =
4509 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4510 __ mov(scratch_, Operand(non_smi_exponent));
4511 // Set the sign bit in scratch_ if the value was negative.
4512 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4513 // Subtract from 0 if the value was negative.
4514 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4515 // We should be masking the implict first digit of the mantissa away here,
4516 // but it just ends up combining harmlessly with the last digit of the
4517 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4518 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4519 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4520 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4521 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4522 __ str(scratch_, FieldMemOperand(the_heap_number_,
4523 HeapNumber::kExponentOffset));
4524 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4525 __ str(scratch_, FieldMemOperand(the_heap_number_,
4526 HeapNumber::kMantissaOffset));
4527 __ Ret();
4528
4529 __ bind(&max_negative_int);
4530 // The max negative int32 is stored as a positive number in the mantissa of
4531 // a double because it uses a sign bit instead of using two's complement.
4532 // The actual mantissa bits stored are all 0 because the implicit most
4533 // significant 1 bit is not stored.
4534 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4535 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4536 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4537 __ mov(ip, Operand(0));
4538 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4539 __ Ret();
4540}
4541
4542
4543// Handle the case where the lhs and rhs are the same object.
4544// Equality is almost reflexive (everything but NaN), so this is a test
4545// for "identity and not NaN".
4546static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4547 Label* slow,
4548 Condition cc) {
4549 Label not_identical;
4550 __ cmp(r0, Operand(r1));
4551 __ b(ne, &not_identical);
4552
4553 Register exp_mask_reg = r5;
4554 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4555
4556 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4557 // so we do the second best thing - test it ourselves.
4558 Label heap_number, return_equal;
4559 // They are both equal and they are not both Smis so both of them are not
4560 // Smis. If it's not a heap number, then return equal.
4561 if (cc == lt || cc == gt) {
4562 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
4563 __ b(ge, slow);
4564 } else {
4565 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4566 __ b(eq, &heap_number);
4567 // Comparing JS objects with <=, >= is complicated.
4568 if (cc != eq) {
4569 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4570 __ b(ge, slow);
4571 }
4572 }
4573 __ bind(&return_equal);
4574 if (cc == lt) {
4575 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4576 } else if (cc == gt) {
4577 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4578 } else {
4579 __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
4580 }
4581 __ mov(pc, Operand(lr)); // Return.
4582
4583 // For less and greater we don't have to check for NaN since the result of
4584 // x < x is false regardless. For the others here is some code to check
4585 // for NaN.
4586 if (cc != lt && cc != gt) {
4587 __ bind(&heap_number);
4588 // It is a heap number, so return non-equal if it's NaN and equal if it's
4589 // not NaN.
4590 // The representation of NaN values has all exponent bits (52..62) set,
4591 // and not all mantissa bits (0..51) clear.
4592 // Read top bits of double representation (second word of value).
4593 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4594 // Test that exponent bits are all set.
4595 __ and_(r3, r2, Operand(exp_mask_reg));
4596 __ cmp(r3, Operand(exp_mask_reg));
4597 __ b(ne, &return_equal);
4598
4599 // Shift out flag and all exponent bits, retaining only mantissa.
4600 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4601 // Or with all low-bits of mantissa.
4602 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4603 __ orr(r0, r3, Operand(r2), SetCC);
4604 // For equal we already have the right value in r0: Return zero (equal)
4605 // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
4606 // (it's a NaN). For <= and >= we need to load r0 with the failing value
4607 // if it's a NaN.
4608 if (cc != eq) {
4609 // All-zero means Infinity means equal.
4610 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4611 if (cc == le) {
4612 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4613 } else {
4614 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4615 }
4616 }
4617 __ mov(pc, Operand(lr)); // Return.
4618 }
4619 // No fall through here.
4620
4621 __ bind(&not_identical);
4622}
4623
4624
4625// See comment at call site.
4626static void EmitSmiNonsmiComparison(MacroAssembler* masm,
4627 Label* rhs_not_nan,
4628 Label* slow,
4629 bool strict) {
4630 Label lhs_is_smi;
4631 __ tst(r0, Operand(kSmiTagMask));
4632 __ b(eq, &lhs_is_smi);
4633
4634 // Rhs is a Smi. Check whether the non-smi is a heap number.
4635 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4636 if (strict) {
4637 // If lhs was not a number and rhs was a Smi then strict equality cannot
4638 // succeed. Return non-equal (r0 is already not zero)
4639 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4640 } else {
4641 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4642 // the runtime.
4643 __ b(ne, slow);
4644 }
4645
4646 // Rhs is a smi, lhs is a number.
4647 __ push(lr);
4648 __ mov(r7, Operand(r1));
4649 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4650 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
4651 // r3 and r2 are rhs as double.
4652 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4653 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4654 // We now have both loaded as doubles but we can skip the lhs nan check
4655 // since it's a Smi.
4656 __ pop(lr);
4657 __ jmp(rhs_not_nan);
4658
4659 __ bind(&lhs_is_smi);
4660 // Lhs is a Smi. Check whether the non-smi is a heap number.
4661 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4662 if (strict) {
4663 // If lhs was not a number and rhs was a Smi then strict equality cannot
4664 // succeed. Return non-equal.
4665 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4666 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4667 } else {
4668 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4669 // the runtime.
4670 __ b(ne, slow);
4671 }
4672
4673 // Lhs is a smi, rhs is a number.
4674 // r0 is Smi and r1 is heap number.
4675 __ push(lr);
4676 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4677 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4678 __ mov(r7, Operand(r0));
4679 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4680 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
4681 __ pop(lr);
4682 // Fall through to both_loaded_as_doubles.
4683}
4684
4685
4686void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
4687 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
4688 Register lhs_exponent = exp_first ? r0 : r1;
4689 Register rhs_exponent = exp_first ? r2 : r3;
4690 Register lhs_mantissa = exp_first ? r1 : r0;
4691 Register rhs_mantissa = exp_first ? r3 : r2;
4692 Label one_is_nan, neither_is_nan;
4693
4694 Register exp_mask_reg = r5;
4695
4696 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4697 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
4698 __ cmp(r4, Operand(exp_mask_reg));
4699 __ b(ne, rhs_not_nan);
4700 __ mov(r4,
4701 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4702 SetCC);
4703 __ b(ne, &one_is_nan);
4704 __ cmp(rhs_mantissa, Operand(0));
4705 __ b(ne, &one_is_nan);
4706
4707 __ bind(rhs_not_nan);
4708 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4709 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
4710 __ cmp(r4, Operand(exp_mask_reg));
4711 __ b(ne, &neither_is_nan);
4712 __ mov(r4,
4713 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4714 SetCC);
4715 __ b(ne, &one_is_nan);
4716 __ cmp(lhs_mantissa, Operand(0));
4717 __ b(eq, &neither_is_nan);
4718
4719 __ bind(&one_is_nan);
4720 // NaN comparisons always fail.
4721 // Load whatever we need in r0 to make the comparison fail.
4722 if (cc == lt || cc == le) {
4723 __ mov(r0, Operand(GREATER));
4724 } else {
4725 __ mov(r0, Operand(LESS));
4726 }
4727 __ mov(pc, Operand(lr)); // Return.
4728
4729 __ bind(&neither_is_nan);
4730}
4731
4732
4733// See comment at call site.
4734static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
4735 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
4736 Register lhs_exponent = exp_first ? r0 : r1;
4737 Register rhs_exponent = exp_first ? r2 : r3;
4738 Register lhs_mantissa = exp_first ? r1 : r0;
4739 Register rhs_mantissa = exp_first ? r3 : r2;
4740
4741 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
4742 if (cc == eq) {
4743 // Doubles are not equal unless they have the same bit pattern.
4744 // Exception: 0 and -0.
4745 __ cmp(lhs_mantissa, Operand(rhs_mantissa));
4746 __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
4747 // Return non-zero if the numbers are unequal.
4748 __ mov(pc, Operand(lr), LeaveCC, ne);
4749
4750 __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
4751 // If exponents are equal then return 0.
4752 __ mov(pc, Operand(lr), LeaveCC, eq);
4753
4754 // Exponents are unequal. The only way we can return that the numbers
4755 // are equal is if one is -0 and the other is 0. We already dealt
4756 // with the case where both are -0 or both are 0.
4757 // We start by seeing if the mantissas (that are equal) or the bottom
4758 // 31 bits of the rhs exponent are non-zero. If so we return not
4759 // equal.
4760 __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
4761 __ mov(r0, Operand(r4), LeaveCC, ne);
4762 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
4763 // Now they are equal if and only if the lhs exponent is zero in its
4764 // low 31 bits.
4765 __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
4766 __ mov(pc, Operand(lr));
4767 } else {
4768 // Call a native function to do a comparison between two non-NaNs.
4769 // Call C routine that may not cause GC or other trouble.
4770 __ mov(r5, Operand(ExternalReference::compare_doubles()));
4771 __ Jump(r5); // Tail call.
4772 }
4773}
4774
4775
4776// See comment at call site.
4777static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
4778 // If either operand is a JSObject or an oddball value, then they are
4779 // not equal since their pointers are different.
4780 // There is no test for undetectability in strict equality.
4781 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4782 Label first_non_object;
4783 // Get the type of the first operand into r2 and compare it with
4784 // FIRST_JS_OBJECT_TYPE.
4785 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
4786 __ b(lt, &first_non_object);
4787
4788 // Return non-zero (r0 is not zero)
4789 Label return_not_equal;
4790 __ bind(&return_not_equal);
4791 __ mov(pc, Operand(lr)); // Return.
4792
4793 __ bind(&first_non_object);
4794 // Check for oddballs: true, false, null, undefined.
4795 __ cmp(r2, Operand(ODDBALL_TYPE));
4796 __ b(eq, &return_not_equal);
4797
4798 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
4799 __ b(ge, &return_not_equal);
4800
4801 // Check for oddballs: true, false, null, undefined.
4802 __ cmp(r3, Operand(ODDBALL_TYPE));
4803 __ b(eq, &return_not_equal);
4804}
4805
4806
4807// See comment at call site.
4808static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
4809 Label* both_loaded_as_doubles,
4810 Label* not_heap_numbers,
4811 Label* slow) {
4812 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
4813 __ b(ne, not_heap_numbers);
4814 __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
4815 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
4816
4817 // Both are heap numbers. Load them up then jump to the code we have
4818 // for that.
4819 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4820 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4821 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4822 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4823 __ jmp(both_loaded_as_doubles);
4824}
4825
4826
4827// Fast negative check for symbol-to-symbol equality.
4828static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
4829 // r2 is object type of r0.
4830 __ tst(r2, Operand(kIsNotStringMask));
4831 __ b(ne, slow);
4832 __ tst(r2, Operand(kIsSymbolMask));
4833 __ b(eq, slow);
4834 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
4835 __ b(ge, slow);
4836 __ tst(r3, Operand(kIsSymbolMask));
4837 __ b(eq, slow);
4838
4839 // Both are symbols. We already checked they weren't the same pointer
4840 // so they are not equal.
4841 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
4842 __ mov(pc, Operand(lr)); // Return.
4843}
4844
4845
4846// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
4847// positive or negative to indicate the result of the comparison.
4848void CompareStub::Generate(MacroAssembler* masm) {
4849 Label slow; // Call builtin.
4850 Label not_smis, both_loaded_as_doubles, rhs_not_nan;
4851
4852 // NOTICE! This code is only reached after a smi-fast-case check, so
4853 // it is certain that at least one operand isn't a smi.
4854
4855 // Handle the case where the objects are identical. Either returns the answer
4856 // or goes to slow. Only falls through if the objects were not identical.
4857 EmitIdenticalObjectComparison(masm, &slow, cc_);
4858
4859 // If either is a Smi (we know that not both are), then they can only
4860 // be strictly equal if the other is a HeapNumber.
4861 ASSERT_EQ(0, kSmiTag);
4862 ASSERT_EQ(0, Smi::FromInt(0));
4863 __ and_(r2, r0, Operand(r1));
4864 __ tst(r2, Operand(kSmiTagMask));
4865 __ b(ne, &not_smis);
4866 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
4867 // 1) Return the answer.
4868 // 2) Go to slow.
4869 // 3) Fall through to both_loaded_as_doubles.
4870 // 4) Jump to rhs_not_nan.
4871 // In cases 3 and 4 we have found out we were dealing with a number-number
4872 // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
4873 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
4874
4875 __ bind(&both_loaded_as_doubles);
4876 // r0, r1, r2, r3 are the double representations of the left hand side
4877 // and the right hand side.
4878
4879 // Checks for NaN in the doubles we have loaded. Can return the answer or
4880 // fall through if neither is a NaN. Also binds rhs_not_nan.
4881 EmitNanCheck(masm, &rhs_not_nan, cc_);
4882
4883 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
4884 // answer. Never falls through.
4885 EmitTwoNonNanDoubleComparison(masm, cc_);
4886
4887 __ bind(&not_smis);
4888 // At this point we know we are dealing with two different objects,
4889 // and neither of them is a Smi. The objects are in r0 and r1.
4890 if (strict_) {
4891 // This returns non-equal for some object types, or falls through if it
4892 // was not lucky.
4893 EmitStrictTwoHeapObjectCompare(masm);
4894 }
4895
4896 Label check_for_symbols;
4897 // Check for heap-number-heap-number comparison. Can jump to slow case,
4898 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
4899 // that case. If the inputs are not doubles then jumps to check_for_symbols.
4900 // In this case r2 will contain the type of r0.
4901 EmitCheckForTwoHeapNumbers(masm,
4902 &both_loaded_as_doubles,
4903 &check_for_symbols,
4904 &slow);
4905
4906 __ bind(&check_for_symbols);
4907 if (cc_ == eq) {
4908 // Either jumps to slow or returns the answer. Assumes that r2 is the type
4909 // of r0 on entry.
4910 EmitCheckForSymbols(masm, &slow);
4911 }
4912
4913 __ bind(&slow);
4914 __ push(lr);
4915 __ push(r1);
4916 __ push(r0);
4917 // Figure out which native to call and setup the arguments.
4918 Builtins::JavaScript native;
4919 int arg_count = 1; // Not counting receiver.
4920 if (cc_ == eq) {
4921 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4922 } else {
4923 native = Builtins::COMPARE;
4924 int ncr; // NaN compare result
4925 if (cc_ == lt || cc_ == le) {
4926 ncr = GREATER;
4927 } else {
4928 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
4929 ncr = LESS;
4930 }
4931 arg_count++;
4932 __ mov(r0, Operand(Smi::FromInt(ncr)));
4933 __ push(r0);
4934 }
4935
4936 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4937 // tagged as a small integer.
4938 __ mov(r0, Operand(arg_count));
4939 __ InvokeBuiltin(native, CALL_JS);
4940 __ cmp(r0, Operand(0));
4941 __ pop(pc);
4942}
4943
4944
4945// Allocates a heap number or jumps to the label if the young space is full and
4946// a scavenge is needed.
4947static void AllocateHeapNumber(
4948 MacroAssembler* masm,
4949 Label* need_gc, // Jump here if young space is full.
4950 Register result, // The tagged address of the new heap number.
4951 Register scratch1, // A scratch register.
4952 Register scratch2) { // Another scratch register.
4953 // Allocate an object in the heap for the heap number and tag it as a heap
4954 // object.
4955 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
4956 result,
4957 scratch1,
4958 scratch2,
4959 need_gc,
4960 TAG_OBJECT);
4961
4962 // Get heap number map and store it in the allocated object.
4963 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
4964 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4965}
4966
4967
4968// We fall into this code if the operands were Smis, but the result was
4969// not (eg. overflow). We branch into this code (to the not_smi label) if
4970// the operands were not both Smi. The operands are in r0 and r1. In order
4971// to call the C-implemented binary fp operation routines we need to end up
4972// with the double precision floating point operands in r0 and r1 (for the
4973// value in r1) and r2 and r3 (for the value in r0).
4974static void HandleBinaryOpSlowCases(MacroAssembler* masm,
4975 Label* not_smi,
4976 const Builtins::JavaScript& builtin,
4977 Token::Value operation,
4978 OverwriteMode mode) {
4979 Label slow, slow_pop_2_first, do_the_call;
4980 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
4981 // Smi-smi case (overflow).
4982 // Since both are Smis there is no heap number to overwrite, so allocate.
4983 // The new heap number is in r5. r6 and r7 are scratch.
4984 AllocateHeapNumber(masm, &slow, r5, r6, r7);
4985 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
4986 __ mov(r7, Operand(r0));
4987 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4988 __ push(lr);
4989 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
4990 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
4991 __ mov(r7, Operand(r1));
4992 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4993 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
4994 __ pop(lr);
4995 __ jmp(&do_the_call); // Tail call. No return.
4996
4997 // We jump to here if something goes wrong (one param is not a number of any
4998 // sort or new-space allocation fails).
4999 __ bind(&slow);
5000 __ push(r1);
5001 __ push(r0);
5002 __ mov(r0, Operand(1)); // Set number of arguments.
5003 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5004
5005 // We branch here if at least one of r0 and r1 is not a Smi.
5006 __ bind(not_smi);
5007 if (mode == NO_OVERWRITE) {
5008 // In the case where there is no chance of an overwritable float we may as
5009 // well do the allocation immediately while r0 and r1 are untouched.
5010 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5011 }
5012
5013 // Move r0 to a double in r2-r3.
5014 __ tst(r0, Operand(kSmiTagMask));
5015 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5016 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5017 __ b(ne, &slow);
5018 if (mode == OVERWRITE_RIGHT) {
5019 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5020 }
5021 // Calling convention says that second double is in r2 and r3.
5022 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5023 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5024 __ jmp(&finished_loading_r0);
5025 __ bind(&r0_is_smi);
5026 if (mode == OVERWRITE_RIGHT) {
5027 // We can't overwrite a Smi so get address of new heap number into r5.
5028 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5029 }
5030 // Write Smi from r0 to r3 and r2 in double format.
5031 __ mov(r7, Operand(r0));
5032 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5033 __ push(lr);
5034 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5035 __ pop(lr);
5036 __ bind(&finished_loading_r0);
5037
5038 // Move r1 to a double in r0-r1.
5039 __ tst(r1, Operand(kSmiTagMask));
5040 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5041 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5042 __ b(ne, &slow);
5043 if (mode == OVERWRITE_LEFT) {
5044 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5045 }
5046 // Calling convention says that first double is in r0 and r1.
5047 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5048 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5049 __ jmp(&finished_loading_r1);
5050 __ bind(&r1_is_smi);
5051 if (mode == OVERWRITE_LEFT) {
5052 // We can't overwrite a Smi so get address of new heap number into r5.
5053 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5054 }
5055 // Write Smi from r1 to r1 and r0 in double format.
5056 __ mov(r7, Operand(r1));
5057 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5058 __ push(lr);
5059 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5060 __ pop(lr);
5061 __ bind(&finished_loading_r1);
5062
5063 __ bind(&do_the_call);
5064 // r0: Left value (least significant part of mantissa).
5065 // r1: Left value (sign, exponent, top of mantissa).
5066 // r2: Right value (least significant part of mantissa).
5067 // r3: Right value (sign, exponent, top of mantissa).
5068 // r5: Address of heap number for result.
5069 __ push(lr); // For later.
5070 __ push(r5); // Address of heap number that is answer.
5071 __ AlignStack(0);
5072 // Call C routine that may not cause GC or other trouble.
5073 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5074 __ Call(r5);
5075 __ pop(r4); // Address of heap number.
5076 __ cmp(r4, Operand(Smi::FromInt(0)));
5077 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5078 // Store answer in the overwritable heap number.
5079#if !defined(USE_ARM_EABI)
5080 // Double returned in fp coprocessor register 0 and 1, encoded as register
5081 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5082 // substract the tag from r4.
5083 __ sub(r5, r4, Operand(kHeapObjectTag));
5084 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5085#else
5086 // Double returned in registers 0 and 1.
5087 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5088 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5089#endif
5090 __ mov(r0, Operand(r4));
5091 // And we are done.
5092 __ pop(pc);
5093}
5094
5095
5096// Tries to get a signed int32 out of a double precision floating point heap
5097// number. Rounds towards 0. Fastest for doubles that are in the ranges
5098// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5099// almost to the range of signed int32 values that are not Smis. Jumps to the
5100// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5101// (excluding the endpoints).
5102static void GetInt32(MacroAssembler* masm,
5103 Register source,
5104 Register dest,
5105 Register scratch,
5106 Register scratch2,
5107 Label* slow) {
5108 Label right_exponent, done;
5109 // Get exponent word.
5110 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5111 // Get exponent alone in scratch2.
5112 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5113 // Load dest with zero. We use this either for the final shift or
5114 // for the answer.
5115 __ mov(dest, Operand(0));
5116 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5117 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5118 // the exponent that we are fastest at and also the highest exponent we can
5119 // handle here.
5120 const uint32_t non_smi_exponent =
5121 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5122 __ cmp(scratch2, Operand(non_smi_exponent));
5123 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5124 __ b(eq, &right_exponent);
5125 // If the exponent is higher than that then go to slow case. This catches
5126 // numbers that don't fit in a signed int32, infinities and NaNs.
5127 __ b(gt, slow);
5128
5129 // We know the exponent is smaller than 30 (biased). If it is less than
5130 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5131 // it rounds to zero.
5132 const uint32_t zero_exponent =
5133 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5134 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5135 // Dest already has a Smi zero.
5136 __ b(lt, &done);
5137 // We have a shifted exponent between 0 and 30 in scratch2.
5138 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5139 // We now have the exponent in dest. Subtract from 30 to get
5140 // how much to shift down.
5141 __ rsb(dest, dest, Operand(30));
5142
5143 __ bind(&right_exponent);
5144 // Get the top bits of the mantissa.
5145 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5146 // Put back the implicit 1.
5147 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5148 // Shift up the mantissa bits to take up the space the exponent used to take.
5149 // We just orred in the implicit bit so that took care of one and we want to
5150 // leave the sign bit 0 so we subtract 2 bits from the shift distance.
5151 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5152 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5153 // Put sign in zero flag.
5154 __ tst(scratch, Operand(HeapNumber::kSignMask));
5155 // Get the second half of the double. For some exponents we don't actually
5156 // need this because the bits get shifted out again, but it's probably slower
5157 // to test than just to do it.
5158 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5159 // Shift down 22 bits to get the last 10 bits.
5160 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5161 // Move down according to the exponent.
5162 __ mov(dest, Operand(scratch, LSR, dest));
5163 // Fix sign if sign bit was set.
5164 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5165 __ bind(&done);
5166}
5167
5168
5169// For bitwise ops where the inputs are not both Smis we here try to determine
5170// whether both inputs are either Smis or at least heap numbers that can be
5171// represented by a 32 bit signed value. We truncate towards zero as required
5172// by the ES spec. If this is the case we do the bitwise op and see if the
5173// result is a Smi. If so, great, otherwise we try to find a heap number to
5174// write the answer into (either by allocating or by overwriting).
5175// On entry the operands are in r0 and r1. On exit the answer is in r0.
5176void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5177 Label slow, result_not_a_smi;
5178 Label r0_is_smi, r1_is_smi;
5179 Label done_checking_r0, done_checking_r1;
5180
5181 __ tst(r1, Operand(kSmiTagMask));
5182 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5183 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5184 __ b(ne, &slow);
5185 GetInt32(masm, r1, r3, r4, r5, &slow);
5186 __ jmp(&done_checking_r1);
5187 __ bind(&r1_is_smi);
5188 __ mov(r3, Operand(r1, ASR, 1));
5189 __ bind(&done_checking_r1);
5190
5191 __ tst(r0, Operand(kSmiTagMask));
5192 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5193 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5194 __ b(ne, &slow);
5195 GetInt32(masm, r0, r2, r4, r5, &slow);
5196 __ jmp(&done_checking_r0);
5197 __ bind(&r0_is_smi);
5198 __ mov(r2, Operand(r0, ASR, 1));
5199 __ bind(&done_checking_r0);
5200
5201 // r0 and r1: Original operands (Smi or heap numbers).
5202 // r2 and r3: Signed int32 operands.
5203 switch (op_) {
5204 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5205 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5206 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5207 case Token::SAR:
5208 // Use only the 5 least significant bits of the shift count.
5209 __ and_(r2, r2, Operand(0x1f));
5210 __ mov(r2, Operand(r3, ASR, r2));
5211 break;
5212 case Token::SHR:
5213 // Use only the 5 least significant bits of the shift count.
5214 __ and_(r2, r2, Operand(0x1f));
5215 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5216 // SHR is special because it is required to produce a positive answer.
5217 // The code below for writing into heap numbers isn't capable of writing
5218 // the register as an unsigned int so we go to slow case if we hit this
5219 // case.
5220 __ b(mi, &slow);
5221 break;
5222 case Token::SHL:
5223 // Use only the 5 least significant bits of the shift count.
5224 __ and_(r2, r2, Operand(0x1f));
5225 __ mov(r2, Operand(r3, LSL, r2));
5226 break;
5227 default: UNREACHABLE();
5228 }
5229 // check that the *signed* result fits in a smi
5230 __ add(r3, r2, Operand(0x40000000), SetCC);
5231 __ b(mi, &result_not_a_smi);
5232 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5233 __ Ret();
5234
5235 Label have_to_allocate, got_a_heap_number;
5236 __ bind(&result_not_a_smi);
5237 switch (mode_) {
5238 case OVERWRITE_RIGHT: {
5239 __ tst(r0, Operand(kSmiTagMask));
5240 __ b(eq, &have_to_allocate);
5241 __ mov(r5, Operand(r0));
5242 break;
5243 }
5244 case OVERWRITE_LEFT: {
5245 __ tst(r1, Operand(kSmiTagMask));
5246 __ b(eq, &have_to_allocate);
5247 __ mov(r5, Operand(r1));
5248 break;
5249 }
5250 case NO_OVERWRITE: {
5251 // Get a new heap number in r5. r6 and r7 are scratch.
5252 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5253 }
5254 default: break;
5255 }
5256 __ bind(&got_a_heap_number);
5257 // r2: Answer as signed int32.
5258 // r5: Heap number to write answer into.
5259
5260 // Nothing can go wrong now, so move the heap number to r0, which is the
5261 // result.
5262 __ mov(r0, Operand(r5));
5263
5264 // Tail call that writes the int32 in r2 to the heap number in r0, using
5265 // r3 as scratch. r0 is preserved and returned.
5266 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5267 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5268
5269 if (mode_ != NO_OVERWRITE) {
5270 __ bind(&have_to_allocate);
5271 // Get a new heap number in r5. r6 and r7 are scratch.
5272 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5273 __ jmp(&got_a_heap_number);
5274 }
5275
5276 // If all else failed then we go to the runtime system.
5277 __ bind(&slow);
5278 __ push(r1); // restore stack
5279 __ push(r0);
5280 __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
5281 switch (op_) {
5282 case Token::BIT_OR:
5283 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5284 break;
5285 case Token::BIT_AND:
5286 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5287 break;
5288 case Token::BIT_XOR:
5289 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5290 break;
5291 case Token::SAR:
5292 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5293 break;
5294 case Token::SHR:
5295 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5296 break;
5297 case Token::SHL:
5298 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5299 break;
5300 default:
5301 UNREACHABLE();
5302 }
5303}
5304
5305
5306// Can we multiply by x with max two shifts and an add.
5307// This answers yes to all integers from 2 to 10.
5308static bool IsEasyToMultiplyBy(int x) {
5309 if (x < 2) return false; // Avoid special cases.
5310 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5311 if (IsPowerOf2(x)) return true; // Simple shift.
5312 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5313 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5314 return false;
5315}
5316
5317
5318// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5319// Source and destination may be the same register. This routine does
5320// not set carry and overflow the way a mul instruction would.
5321static void MultiplyByKnownInt(MacroAssembler* masm,
5322 Register source,
5323 Register destination,
5324 int known_int) {
5325 if (IsPowerOf2(known_int)) {
5326 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5327 } else if (PopCountLessThanEqual2(known_int)) {
5328 int first_bit = BitPosition(known_int);
5329 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5330 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5331 if (first_bit != 0) {
5332 __ mov(destination, Operand(destination, LSL, first_bit));
5333 }
5334 } else {
5335 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5336 int the_bit = BitPosition(known_int + 1);
5337 __ rsb(destination, source, Operand(source, LSL, the_bit));
5338 }
5339}
5340
5341
5342// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5343// a register for the cases where it doesn't know a good trick, and may deliver
5344// a result that needs shifting.
5345static void MultiplyByKnownInt2(
5346 MacroAssembler* masm,
5347 Register result,
5348 Register source,
5349 Register known_int_register, // Smi tagged.
5350 int known_int,
5351 int* required_shift) { // Including Smi tag shift
5352 switch (known_int) {
5353 case 3:
5354 __ add(result, source, Operand(source, LSL, 1));
5355 *required_shift = 1;
5356 break;
5357 case 5:
5358 __ add(result, source, Operand(source, LSL, 2));
5359 *required_shift = 1;
5360 break;
5361 case 6:
5362 __ add(result, source, Operand(source, LSL, 1));
5363 *required_shift = 2;
5364 break;
5365 case 7:
5366 __ rsb(result, source, Operand(source, LSL, 3));
5367 *required_shift = 1;
5368 break;
5369 case 9:
5370 __ add(result, source, Operand(source, LSL, 3));
5371 *required_shift = 1;
5372 break;
5373 case 10:
5374 __ add(result, source, Operand(source, LSL, 2));
5375 *required_shift = 2;
5376 break;
5377 default:
5378 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5379 __ mul(result, source, known_int_register);
5380 *required_shift = 0;
5381 }
5382}
5383
5384
5385void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5386 // r1 : x
5387 // r0 : y
5388 // result : r0
5389
5390 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5391 // tell us that.
5392 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5393
5394 switch (op_) {
5395 case Token::ADD: {
5396 Label not_smi;
5397 // Fast path.
5398 ASSERT(kSmiTag == 0); // Adjust code below.
5399 __ tst(r2, Operand(kSmiTagMask));
5400 __ b(ne, &not_smi);
5401 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5402 // Return if no overflow.
5403 __ Ret(vc);
5404 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5405
5406 HandleBinaryOpSlowCases(masm,
5407 &not_smi,
5408 Builtins::ADD,
5409 Token::ADD,
5410 mode_);
5411 break;
5412 }
5413
5414 case Token::SUB: {
5415 Label not_smi;
5416 // Fast path.
5417 ASSERT(kSmiTag == 0); // Adjust code below.
5418 __ tst(r2, Operand(kSmiTagMask));
5419 __ b(ne, &not_smi);
5420 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5421 // Return if no overflow.
5422 __ Ret(vc);
5423 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5424
5425 HandleBinaryOpSlowCases(masm,
5426 &not_smi,
5427 Builtins::SUB,
5428 Token::SUB,
5429 mode_);
5430 break;
5431 }
5432
5433 case Token::MUL: {
5434 Label not_smi, slow;
5435 ASSERT(kSmiTag == 0); // adjust code below
5436 __ tst(r2, Operand(kSmiTagMask));
5437 __ b(ne, &not_smi);
5438 // Remove tag from one operand (but keep sign), so that result is Smi.
5439 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
5440 // Do multiplication
5441 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
5442 // Go slow on overflows (overflow bit is not set).
5443 __ mov(ip, Operand(r3, ASR, 31));
5444 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
5445 __ b(ne, &slow);
5446 // Go slow on zero result to handle -0.
5447 __ tst(r3, Operand(r3));
5448 __ mov(r0, Operand(r3), LeaveCC, ne);
5449 __ Ret(ne);
5450 // We need -0 if we were multiplying a negative number with 0 to get 0.
5451 // We know one of them was zero.
5452 __ add(r2, r0, Operand(r1), SetCC);
5453 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5454 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5455 // Slow case. We fall through here if we multiplied a negative number
5456 // with 0, because that would mean we should produce -0.
5457 __ bind(&slow);
5458
5459 HandleBinaryOpSlowCases(masm,
5460 &not_smi,
5461 Builtins::MUL,
5462 Token::MUL,
5463 mode_);
5464 break;
5465 }
5466
5467 case Token::DIV:
5468 case Token::MOD: {
5469 Label not_smi;
5470 if (specialized_on_rhs_) {
5471 Label smi_is_unsuitable;
5472 __ BranchOnNotSmi(r1, &not_smi);
5473 if (IsPowerOf2(constant_rhs_)) {
5474 if (op_ == Token::MOD) {
5475 __ and_(r0,
5476 r1,
5477 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
5478 SetCC);
5479 // We now have the answer, but if the input was negative we also
5480 // have the sign bit. Our work is done if the result is
5481 // positive or zero:
5482 __ Ret(pl);
5483 // A mod of a negative left hand side must return a negative number.
5484 // Unfortunately if the answer is 0 then we must return -0. And we
5485 // already optimistically trashed r0 so we may need to restore it.
5486 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5487 // Next two instructions are conditional on the answer being -0.
5488 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
5489 __ b(eq, &smi_is_unsuitable);
5490 // We need to subtract the dividend. Eg. -3 % 4 == -3.
5491 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
5492 } else {
5493 ASSERT(op_ == Token::DIV);
5494 __ tst(r1,
5495 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
5496 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5497 int shift = 0;
5498 int d = constant_rhs_;
5499 while ((d & 1) == 0) {
5500 d >>= 1;
5501 shift++;
5502 }
5503 __ mov(r0, Operand(r1, LSR, shift));
5504 __ bic(r0, r0, Operand(kSmiTagMask));
5505 }
5506 } else {
5507 // Not a power of 2.
5508 __ tst(r1, Operand(0x80000000u));
5509 __ b(ne, &smi_is_unsuitable);
5510 // Find a fixed point reciprocal of the divisor so we can divide by
5511 // multiplying.
5512 double divisor = 1.0 / constant_rhs_;
5513 int shift = 32;
5514 double scale = 4294967296.0; // 1 << 32.
5515 uint32_t mul;
5516 // Maximise the precision of the fixed point reciprocal.
5517 while (true) {
5518 mul = static_cast<uint32_t>(scale * divisor);
5519 if (mul >= 0x7fffffff) break;
5520 scale *= 2.0;
5521 shift++;
5522 }
5523 mul++;
5524 __ mov(r2, Operand(mul));
5525 __ umull(r3, r2, r2, r1);
5526 __ mov(r2, Operand(r2, LSR, shift - 31));
5527 // r2 is r1 / rhs. r2 is not Smi tagged.
5528 // r0 is still the known rhs. r0 is Smi tagged.
5529 // r1 is still the unkown lhs. r1 is Smi tagged.
5530 int required_r4_shift = 0; // Including the Smi tag shift of 1.
5531 // r4 = r2 * r0.
5532 MultiplyByKnownInt2(masm,
5533 r4,
5534 r2,
5535 r0,
5536 constant_rhs_,
5537 &required_r4_shift);
5538 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
5539 if (op_ == Token::DIV) {
5540 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
5541 __ b(ne, &smi_is_unsuitable); // There was a remainder.
5542 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5543 } else {
5544 ASSERT(op_ == Token::MOD);
5545 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
5546 }
5547 }
5548 __ Ret();
5549 __ bind(&smi_is_unsuitable);
5550 } else {
5551 __ jmp(&not_smi);
5552 }
5553 HandleBinaryOpSlowCases(masm,
5554 &not_smi,
5555 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
5556 op_,
5557 mode_);
5558 break;
5559 }
5560
5561 case Token::BIT_OR:
5562 case Token::BIT_AND:
5563 case Token::BIT_XOR:
5564 case Token::SAR:
5565 case Token::SHR:
5566 case Token::SHL: {
5567 Label slow;
5568 ASSERT(kSmiTag == 0); // adjust code below
5569 __ tst(r2, Operand(kSmiTagMask));
5570 __ b(ne, &slow);
5571 switch (op_) {
5572 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
5573 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
5574 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
5575 case Token::SAR:
5576 // Remove tags from right operand.
5577 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5578 // Use only the 5 least significant bits of the shift count.
5579 __ and_(r2, r2, Operand(0x1f));
5580 __ mov(r0, Operand(r1, ASR, r2));
5581 // Smi tag result.
5582 __ bic(r0, r0, Operand(kSmiTagMask));
5583 break;
5584 case Token::SHR:
5585 // Remove tags from operands. We can't do this on a 31 bit number
5586 // because then the 0s get shifted into bit 30 instead of bit 31.
5587 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5588 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5589 // Use only the 5 least significant bits of the shift count.
5590 __ and_(r2, r2, Operand(0x1f));
5591 __ mov(r3, Operand(r3, LSR, r2));
5592 // Unsigned shift is not allowed to produce a negative number, so
5593 // check the sign bit and the sign bit after Smi tagging.
5594 __ tst(r3, Operand(0xc0000000));
5595 __ b(ne, &slow);
5596 // Smi tag result.
5597 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5598 break;
5599 case Token::SHL:
5600 // Remove tags from operands.
5601 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5602 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5603 // Use only the 5 least significant bits of the shift count.
5604 __ and_(r2, r2, Operand(0x1f));
5605 __ mov(r3, Operand(r3, LSL, r2));
5606 // Check that the signed result fits in a Smi.
5607 __ add(r2, r3, Operand(0x40000000), SetCC);
5608 __ b(mi, &slow);
5609 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5610 break;
5611 default: UNREACHABLE();
5612 }
5613 __ Ret();
5614 __ bind(&slow);
5615 HandleNonSmiBitwiseOp(masm);
5616 break;
5617 }
5618
5619 default: UNREACHABLE();
5620 }
5621 // This code should be unreachable.
5622 __ stop("Unreachable");
5623}
5624
5625
5626void StackCheckStub::Generate(MacroAssembler* masm) {
5627 // Do tail-call to runtime routine. Runtime routines expect at least one
5628 // argument, so give it a Smi.
5629 __ mov(r0, Operand(Smi::FromInt(0)));
5630 __ push(r0);
5631 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
5632
5633 __ StubReturn(1);
5634}
5635
5636
5637void UnarySubStub::Generate(MacroAssembler* masm) {
5638 Label undo;
5639 Label slow;
5640 Label not_smi;
5641
5642 // Enter runtime system if the value is not a smi.
5643 __ tst(r0, Operand(kSmiTagMask));
5644 __ b(ne, &not_smi);
5645
5646 // Enter runtime system if the value of the expression is zero
5647 // to make sure that we switch between 0 and -0.
5648 __ cmp(r0, Operand(0));
5649 __ b(eq, &slow);
5650
5651 // The value of the expression is a smi that is not zero. Try
5652 // optimistic subtraction '0 - value'.
5653 __ rsb(r1, r0, Operand(0), SetCC);
5654 __ b(vs, &slow);
5655
5656 __ mov(r0, Operand(r1)); // Set r0 to result.
5657 __ StubReturn(1);
5658
5659 // Enter runtime system.
5660 __ bind(&slow);
5661 __ push(r0);
5662 __ mov(r0, Operand(0)); // Set number of arguments.
5663 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
5664
5665 __ bind(&not_smi);
5666 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
5667 __ b(ne, &slow);
5668 // r0 is a heap number. Get a new heap number in r1.
5669 if (overwrite_) {
5670 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5671 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
5672 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5673 } else {
5674 AllocateHeapNumber(masm, &slow, r1, r2, r3);
5675 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
5676 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5677 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
5678 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
5679 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
5680 __ mov(r0, Operand(r1));
5681 }
5682 __ StubReturn(1);
5683}
5684
5685
5686int CEntryStub::MinorKey() {
5687 ASSERT(result_size_ <= 2);
5688 // Result returned in r0 or r0+r1 by default.
5689 return 0;
5690}
5691
5692
5693void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
5694 // r0 holds the exception.
5695
5696 // Adjust this code if not the case.
5697 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
5698
5699 // Drop the sp to the top of the handler.
5700 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
5701 __ ldr(sp, MemOperand(r3));
5702
5703 // Restore the next handler and frame pointer, discard handler state.
5704 ASSERT(StackHandlerConstants::kNextOffset == 0);
5705 __ pop(r2);
5706 __ str(r2, MemOperand(r3));
5707 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
5708 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
5709
5710 // Before returning we restore the context from the frame pointer if
5711 // not NULL. The frame pointer is NULL in the exception handler of a
5712 // JS entry frame.
5713 __ cmp(fp, Operand(0));
5714 // Set cp to NULL if fp is NULL.
5715 __ mov(cp, Operand(0), LeaveCC, eq);
5716 // Restore cp otherwise.
5717 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
5718#ifdef DEBUG
5719 if (FLAG_debug_code) {
5720 __ mov(lr, Operand(pc));
5721 }
5722#endif
5723 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
5724 __ pop(pc);
5725}
5726
5727
5728void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
5729 UncatchableExceptionType type) {
5730 // Adjust this code if not the case.
5731 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
5732
5733 // Drop sp to the top stack handler.
5734 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
5735 __ ldr(sp, MemOperand(r3));
5736
5737 // Unwind the handlers until the ENTRY handler is found.
5738 Label loop, done;
5739 __ bind(&loop);
5740 // Load the type of the current stack handler.
5741 const int kStateOffset = StackHandlerConstants::kStateOffset;
5742 __ ldr(r2, MemOperand(sp, kStateOffset));
5743 __ cmp(r2, Operand(StackHandler::ENTRY));
5744 __ b(eq, &done);
5745 // Fetch the next handler in the list.
5746 const int kNextOffset = StackHandlerConstants::kNextOffset;
5747 __ ldr(sp, MemOperand(sp, kNextOffset));
5748 __ jmp(&loop);
5749 __ bind(&done);
5750
5751 // Set the top handler address to next handler past the current ENTRY handler.
5752 ASSERT(StackHandlerConstants::kNextOffset == 0);
5753 __ pop(r2);
5754 __ str(r2, MemOperand(r3));
5755
5756 if (type == OUT_OF_MEMORY) {
5757 // Set external caught exception to false.
5758 ExternalReference external_caught(Top::k_external_caught_exception_address);
5759 __ mov(r0, Operand(false));
5760 __ mov(r2, Operand(external_caught));
5761 __ str(r0, MemOperand(r2));
5762
5763 // Set pending exception and r0 to out of memory exception.
5764 Failure* out_of_memory = Failure::OutOfMemoryException();
5765 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
5766 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
5767 __ str(r0, MemOperand(r2));
5768 }
5769
5770 // Stack layout at this point. See also StackHandlerConstants.
5771 // sp -> state (ENTRY)
5772 // fp
5773 // lr
5774
5775 // Discard handler state (r2 is not used) and restore frame pointer.
5776 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
5777 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
5778 // Before returning we restore the context from the frame pointer if
5779 // not NULL. The frame pointer is NULL in the exception handler of a
5780 // JS entry frame.
5781 __ cmp(fp, Operand(0));
5782 // Set cp to NULL if fp is NULL.
5783 __ mov(cp, Operand(0), LeaveCC, eq);
5784 // Restore cp otherwise.
5785 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
5786#ifdef DEBUG
5787 if (FLAG_debug_code) {
5788 __ mov(lr, Operand(pc));
5789 }
5790#endif
5791 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
5792 __ pop(pc);
5793}
5794
5795
5796void CEntryStub::GenerateCore(MacroAssembler* masm,
5797 Label* throw_normal_exception,
5798 Label* throw_termination_exception,
5799 Label* throw_out_of_memory_exception,
5800 StackFrame::Type frame_type,
5801 bool do_gc,
5802 bool always_allocate) {
5803 // r0: result parameter for PerformGC, if any
5804 // r4: number of arguments including receiver (C callee-saved)
5805 // r5: pointer to builtin function (C callee-saved)
5806 // r6: pointer to the first argument (C callee-saved)
5807
5808 if (do_gc) {
5809 // Passing r0.
5810 ExternalReference gc_reference = ExternalReference::perform_gc_function();
5811 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
5812 }
5813
5814 ExternalReference scope_depth =
5815 ExternalReference::heap_always_allocate_scope_depth();
5816 if (always_allocate) {
5817 __ mov(r0, Operand(scope_depth));
5818 __ ldr(r1, MemOperand(r0));
5819 __ add(r1, r1, Operand(1));
5820 __ str(r1, MemOperand(r0));
5821 }
5822
5823 // Call C built-in.
5824 // r0 = argc, r1 = argv
5825 __ mov(r0, Operand(r4));
5826 __ mov(r1, Operand(r6));
5827
5828 // TODO(1242173): To let the GC traverse the return address of the exit
5829 // frames, we need to know where the return address is. Right now,
5830 // we push it on the stack to be able to find it again, but we never
5831 // restore from it in case of changes, which makes it impossible to
5832 // support moving the C entry code stub. This should be fixed, but currently
5833 // this is OK because the CEntryStub gets generated so early in the V8 boot
5834 // sequence that it is not moving ever.
5835 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
5836 masm->push(lr);
5837 masm->Jump(r5);
5838
5839 if (always_allocate) {
5840 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
5841 // though (contain the result).
5842 __ mov(r2, Operand(scope_depth));
5843 __ ldr(r3, MemOperand(r2));
5844 __ sub(r3, r3, Operand(1));
5845 __ str(r3, MemOperand(r2));
5846 }
5847
5848 // check for failure result
5849 Label failure_returned;
5850 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
5851 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
5852 __ add(r2, r0, Operand(1));
5853 __ tst(r2, Operand(kFailureTagMask));
5854 __ b(eq, &failure_returned);
5855
5856 // Exit C frame and return.
5857 // r0:r1: result
5858 // sp: stack pointer
5859 // fp: frame pointer
5860 __ LeaveExitFrame(frame_type);
5861
5862 // check if we should retry or throw exception
5863 Label retry;
5864 __ bind(&failure_returned);
5865 ASSERT(Failure::RETRY_AFTER_GC == 0);
5866 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
5867 __ b(eq, &retry);
5868
5869 // Special handling of out of memory exceptions.
5870 Failure* out_of_memory = Failure::OutOfMemoryException();
5871 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
5872 __ b(eq, throw_out_of_memory_exception);
5873
5874 // Retrieve the pending exception and clear the variable.
5875 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
5876 __ ldr(r3, MemOperand(ip));
5877 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
5878 __ ldr(r0, MemOperand(ip));
5879 __ str(r3, MemOperand(ip));
5880
5881 // Special handling of termination exceptions which are uncatchable
5882 // by javascript code.
5883 __ cmp(r0, Operand(Factory::termination_exception()));
5884 __ b(eq, throw_termination_exception);
5885
5886 // Handle normal exception.
5887 __ jmp(throw_normal_exception);
5888
5889 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
5890}
5891
5892
5893void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
5894 // Called from JavaScript; parameters are on stack as if calling JS function
5895 // r0: number of arguments including receiver
5896 // r1: pointer to builtin function
5897 // fp: frame pointer (restored after C call)
5898 // sp: stack pointer (restored as callee's sp after C call)
5899 // cp: current context (C callee-saved)
5900
5901 // NOTE: Invocations of builtins may return failure objects
5902 // instead of a proper result. The builtin entry handles
5903 // this by performing a garbage collection and retrying the
5904 // builtin once.
5905
5906 StackFrame::Type frame_type = is_debug_break
5907 ? StackFrame::EXIT_DEBUG
5908 : StackFrame::EXIT;
5909
5910 // Enter the exit frame that transitions from JavaScript to C++.
5911 __ EnterExitFrame(frame_type);
5912
5913 // r4: number of arguments (C callee-saved)
5914 // r5: pointer to builtin function (C callee-saved)
5915 // r6: pointer to first argument (C callee-saved)
5916
5917 Label throw_normal_exception;
5918 Label throw_termination_exception;
5919 Label throw_out_of_memory_exception;
5920
5921 // Call into the runtime system.
5922 GenerateCore(masm,
5923 &throw_normal_exception,
5924 &throw_termination_exception,
5925 &throw_out_of_memory_exception,
5926 frame_type,
5927 false,
5928 false);
5929
5930 // Do space-specific GC and retry runtime call.
5931 GenerateCore(masm,
5932 &throw_normal_exception,
5933 &throw_termination_exception,
5934 &throw_out_of_memory_exception,
5935 frame_type,
5936 true,
5937 false);
5938
5939 // Do full GC and retry runtime call one final time.
5940 Failure* failure = Failure::InternalError();
5941 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
5942 GenerateCore(masm,
5943 &throw_normal_exception,
5944 &throw_termination_exception,
5945 &throw_out_of_memory_exception,
5946 frame_type,
5947 true,
5948 true);
5949
5950 __ bind(&throw_out_of_memory_exception);
5951 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
5952
5953 __ bind(&throw_termination_exception);
5954 GenerateThrowUncatchable(masm, TERMINATION);
5955
5956 __ bind(&throw_normal_exception);
5957 GenerateThrowTOS(masm);
5958}
5959
5960
5961void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
5962 // r0: code entry
5963 // r1: function
5964 // r2: receiver
5965 // r3: argc
5966 // [sp+0]: argv
5967
5968 Label invoke, exit;
5969
5970 // Called from C, so do not pop argc and args on exit (preserve sp)
5971 // No need to save register-passed args
5972 // Save callee-saved registers (incl. cp and fp), sp, and lr
5973 __ stm(db_w, sp, kCalleeSaved | lr.bit());
5974
5975 // Get address of argv, see stm above.
5976 // r0: code entry
5977 // r1: function
5978 // r2: receiver
5979 // r3: argc
5980 __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
5981 __ ldr(r4, MemOperand(r4)); // argv
5982
5983 // Push a frame with special values setup to mark it as an entry frame.
5984 // r0: code entry
5985 // r1: function
5986 // r2: receiver
5987 // r3: argc
5988 // r4: argv
5989 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
5990 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
5991 __ mov(r7, Operand(Smi::FromInt(marker)));
5992 __ mov(r6, Operand(Smi::FromInt(marker)));
5993 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
5994 __ ldr(r5, MemOperand(r5));
5995 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
5996
5997 // Setup frame pointer for the frame to be pushed.
5998 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
5999
6000 // Call a faked try-block that does the invoke.
6001 __ bl(&invoke);
6002
6003 // Caught exception: Store result (exception) in the pending
6004 // exception field in the JSEnv and return a failure sentinel.
6005 // Coming in here the fp will be invalid because the PushTryHandler below
6006 // sets it to 0 to signal the existence of the JSEntry frame.
6007 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6008 __ str(r0, MemOperand(ip));
6009 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6010 __ b(&exit);
6011
6012 // Invoke: Link this frame into the handler chain.
6013 __ bind(&invoke);
6014 // Must preserve r0-r4, r5-r7 are available.
6015 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6016 // If an exception not caught by another handler occurs, this handler
6017 // returns control to the code after the bl(&invoke) above, which
6018 // restores all kCalleeSaved registers (including cp and fp) to their
6019 // saved values before returning a failure to C.
6020
6021 // Clear any pending exceptions.
6022 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6023 __ ldr(r5, MemOperand(ip));
6024 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6025 __ str(r5, MemOperand(ip));
6026
6027 // Invoke the function by calling through JS entry trampoline builtin.
6028 // Notice that we cannot store a reference to the trampoline code directly in
6029 // this stub, because runtime stubs are not traversed when doing GC.
6030
6031 // Expected registers by Builtins::JSEntryTrampoline
6032 // r0: code entry
6033 // r1: function
6034 // r2: receiver
6035 // r3: argc
6036 // r4: argv
6037 if (is_construct) {
6038 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6039 __ mov(ip, Operand(construct_entry));
6040 } else {
6041 ExternalReference entry(Builtins::JSEntryTrampoline);
6042 __ mov(ip, Operand(entry));
6043 }
6044 __ ldr(ip, MemOperand(ip)); // deref address
6045
6046 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6047 // macro for the add instruction because we don't want the coverage tool
6048 // inserting instructions here after we read the pc.
6049 __ mov(lr, Operand(pc));
6050 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6051
6052 // Unlink this frame from the handler chain. When reading the
6053 // address of the next handler, there is no need to use the address
6054 // displacement since the current stack pointer (sp) points directly
6055 // to the stack handler.
6056 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6057 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6058 __ str(r3, MemOperand(ip));
6059 // No need to restore registers
6060 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6061
6062
6063 __ bind(&exit); // r0 holds result
6064 // Restore the top frame descriptors from the stack.
6065 __ pop(r3);
6066 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6067 __ str(r3, MemOperand(ip));
6068
6069 // Reset the stack to the callee saved registers.
6070 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6071
6072 // Restore callee-saved registers and return.
6073#ifdef DEBUG
6074 if (FLAG_debug_code) {
6075 __ mov(lr, Operand(pc));
6076 }
6077#endif
6078 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6079}
6080
6081
6082// This stub performs an instanceof, calling the builtin function if
6083// necessary. Uses r1 for the object, r0 for the function that it may
6084// be an instance of (these are fetched from the stack).
6085void InstanceofStub::Generate(MacroAssembler* masm) {
6086 // Get the object - slow case for smis (we may need to throw an exception
6087 // depending on the rhs).
6088 Label slow, loop, is_instance, is_not_instance;
6089 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6090 __ BranchOnSmi(r0, &slow);
6091
6092 // Check that the left hand is a JS object and put map in r3.
6093 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6094 __ b(lt, &slow);
6095 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6096 __ b(gt, &slow);
6097
6098 // Get the prototype of the function (r4 is result, r2 is scratch).
6099 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6100 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6101
6102 // Check that the function prototype is a JS object.
6103 __ BranchOnSmi(r4, &slow);
6104 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6105 __ b(lt, &slow);
6106 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6107 __ b(gt, &slow);
6108
6109 // Register mapping: r3 is object map and r4 is function prototype.
6110 // Get prototype of object into r2.
6111 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6112
6113 // Loop through the prototype chain looking for the function prototype.
6114 __ bind(&loop);
6115 __ cmp(r2, Operand(r4));
6116 __ b(eq, &is_instance);
6117 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6118 __ cmp(r2, ip);
6119 __ b(eq, &is_not_instance);
6120 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6121 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6122 __ jmp(&loop);
6123
6124 __ bind(&is_instance);
6125 __ mov(r0, Operand(Smi::FromInt(0)));
6126 __ pop();
6127 __ pop();
6128 __ mov(pc, Operand(lr)); // Return.
6129
6130 __ bind(&is_not_instance);
6131 __ mov(r0, Operand(Smi::FromInt(1)));
6132 __ pop();
6133 __ pop();
6134 __ mov(pc, Operand(lr)); // Return.
6135
6136 // Slow-case. Tail call builtin.
6137 __ bind(&slow);
6138 __ mov(r0, Operand(1)); // Arg count without receiver.
6139 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6140}
6141
6142
6143void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6144 // Check if the calling frame is an arguments adaptor frame.
6145 Label adaptor;
6146 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6147 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6148 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6149 __ b(eq, &adaptor);
6150
6151 // Nothing to do: The formal number of parameters has already been
6152 // passed in register r0 by calling function. Just return it.
6153 __ Jump(lr);
6154
6155 // Arguments adaptor case: Read the arguments length from the
6156 // adaptor frame and return it.
6157 __ bind(&adaptor);
6158 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6159 __ Jump(lr);
6160}
6161
6162
6163void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6164 // The displacement is the offset of the last parameter (if any)
6165 // relative to the frame pointer.
6166 static const int kDisplacement =
6167 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6168
6169 // Check that the key is a smi.
6170 Label slow;
6171 __ BranchOnNotSmi(r1, &slow);
6172
6173 // Check if the calling frame is an arguments adaptor frame.
6174 Label adaptor;
6175 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6176 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6177 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6178 __ b(eq, &adaptor);
6179
6180 // Check index against formal parameters count limit passed in
6181 // through register eax. Use unsigned comparison to get negative
6182 // check for free.
6183 __ cmp(r1, r0);
6184 __ b(cs, &slow);
6185
6186 // Read the argument from the stack and return it.
6187 __ sub(r3, r0, r1);
6188 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6189 __ ldr(r0, MemOperand(r3, kDisplacement));
6190 __ Jump(lr);
6191
6192 // Arguments adaptor case: Check index against actual arguments
6193 // limit found in the arguments adaptor frame. Use unsigned
6194 // comparison to get negative check for free.
6195 __ bind(&adaptor);
6196 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6197 __ cmp(r1, r0);
6198 __ b(cs, &slow);
6199
6200 // Read the argument from the adaptor frame and return it.
6201 __ sub(r3, r0, r1);
6202 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6203 __ ldr(r0, MemOperand(r3, kDisplacement));
6204 __ Jump(lr);
6205
6206 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6207 // by calling the runtime system.
6208 __ bind(&slow);
6209 __ push(r1);
6210 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6211}
6212
6213
6214void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6215 // Check if the calling frame is an arguments adaptor frame.
6216 Label runtime;
6217 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6218 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6219 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6220 __ b(ne, &runtime);
6221
6222 // Patch the arguments.length and the parameters pointer.
6223 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6224 __ str(r0, MemOperand(sp, 0 * kPointerSize));
6225 __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6226 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6227 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6228
6229 // Do the runtime call to allocate the arguments object.
6230 __ bind(&runtime);
6231 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6232}
6233
6234
6235void CallFunctionStub::Generate(MacroAssembler* masm) {
6236 Label slow;
6237 // Get the function to call from the stack.
6238 // function, receiver [, arguments]
6239 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6240
6241 // Check that the function is really a JavaScript function.
6242 // r1: pushed function (to be verified)
6243 __ BranchOnSmi(r1, &slow);
6244 // Get the map of the function object.
6245 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6246 __ b(ne, &slow);
6247
6248 // Fast-case: Invoke the function now.
6249 // r1: pushed function
6250 ParameterCount actual(argc_);
6251 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6252
6253 // Slow-case: Non-function called.
6254 __ bind(&slow);
6255 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6256 __ mov(r2, Operand(0));
6257 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6258 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6259 RelocInfo::CODE_TARGET);
6260}
6261
6262
6263int CompareStub::MinorKey() {
6264 // Encode the two parameters in a unique 16 bit value.
6265 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
6266 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
6267}
6268
6269
6270#undef __
6271
6272} } // namespace v8::internal