blob: 7c0b0c636f126e94d38164b6dc537f9ae1217317 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39
40namespace v8 {
41namespace internal {
42
43#define __ ACCESS_MASM(masm_)
44
45static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46 Label* slow,
47 Condition cc);
48static void EmitSmiNonsmiComparison(MacroAssembler* masm,
49 Label* rhs_not_nan,
50 Label* slow,
51 bool strict);
52static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
53static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
54static void MultiplyByKnownInt(MacroAssembler* masm,
55 Register source,
56 Register destination,
57 int known_int);
58static bool IsEasyToMultiplyBy(int x);
59
60
61
62// -------------------------------------------------------------------------
63// Platform-specific DeferredCode functions.
64
65void DeferredCode::SaveRegisters() {
66 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
67 int action = registers_[i];
68 if (action == kPush) {
69 __ push(RegisterAllocator::ToRegister(i));
70 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
71 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
72 }
73 }
74}
75
76
77void DeferredCode::RestoreRegisters() {
78 // Restore registers in reverse order due to the stack.
79 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
80 int action = registers_[i];
81 if (action == kPush) {
82 __ pop(RegisterAllocator::ToRegister(i));
83 } else if (action != kIgnore) {
84 action &= ~kSyncedFlag;
85 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
86 }
87 }
88}
89
90
91// -------------------------------------------------------------------------
92// CodeGenState implementation.
93
94CodeGenState::CodeGenState(CodeGenerator* owner)
95 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000096 true_target_(NULL),
97 false_target_(NULL),
98 previous_(NULL) {
99 owner_->set_state(this);
100}
101
102
103CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000104 JumpTarget* true_target,
105 JumpTarget* false_target)
106 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000107 true_target_(true_target),
108 false_target_(false_target),
109 previous_(owner->state()) {
110 owner_->set_state(this);
111}
112
113
114CodeGenState::~CodeGenState() {
115 ASSERT(owner_->state() == this);
116 owner_->set_state(previous_);
117}
118
119
120// -------------------------------------------------------------------------
121// CodeGenerator implementation
122
123CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
124 bool is_eval)
125 : is_eval_(is_eval),
126 script_(script),
127 deferred_(8),
128 masm_(new MacroAssembler(NULL, buffer_size)),
129 scope_(NULL),
130 frame_(NULL),
131 allocator_(NULL),
132 cc_reg_(al),
133 state_(NULL),
134 function_return_is_shadowed_(false) {
135}
136
137
138// Calling conventions:
139// fp: caller's frame pointer
140// sp: stack pointer
141// r1: called JS function
142// cp: callee's context
143
144void CodeGenerator::GenCode(FunctionLiteral* fun) {
Steve Blockd0582a62009-12-15 09:54:21 +0000145 // Record the position for debugging purposes.
146 CodeForFunctionPosition(fun);
147
Steve Blocka7e24c12009-10-30 11:49:00 +0000148 ZoneList<Statement*>* body = fun->body();
149
150 // Initialize state.
151 ASSERT(scope_ == NULL);
152 scope_ = fun->scope();
153 ASSERT(allocator_ == NULL);
154 RegisterAllocator register_allocator(this);
155 allocator_ = &register_allocator;
156 ASSERT(frame_ == NULL);
157 frame_ = new VirtualFrame();
158 cc_reg_ = al;
159 {
160 CodeGenState state(this);
161
162 // Entry:
163 // Stack: receiver, arguments
164 // lr: return address
165 // fp: caller's frame pointer
166 // sp: stack pointer
167 // r1: called JS function
168 // cp: callee's context
169 allocator_->Initialize();
170 frame_->Enter();
171 // tos: code slot
172#ifdef DEBUG
173 if (strlen(FLAG_stop_at) > 0 &&
174 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
175 frame_->SpillAll();
176 __ stop("stop-at");
177 }
178#endif
179
180 // Allocate space for locals and initialize them. This also checks
181 // for stack overflow.
182 frame_->AllocateStackSlots();
183 // Initialize the function return target after the locals are set
184 // up, because it needs the expected frame height from the frame.
185 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
186 function_return_is_shadowed_ = false;
187
188 VirtualFrame::SpilledScope spilled_scope;
189 if (scope_->num_heap_slots() > 0) {
190 // Allocate local context.
191 // Get outer context and create a new context based on it.
192 __ ldr(r0, frame_->Function());
193 frame_->EmitPush(r0);
194 frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
195
196#ifdef DEBUG
197 JumpTarget verified_true;
198 __ cmp(r0, Operand(cp));
199 verified_true.Branch(eq);
200 __ stop("NewContext: r0 is expected to be the same as cp");
201 verified_true.Bind();
202#endif
203 // Update context local.
204 __ str(cp, frame_->Context());
205 }
206
207 // TODO(1241774): Improve this code:
208 // 1) only needed if we have a context
209 // 2) no need to recompute context ptr every single time
210 // 3) don't copy parameter operand code from SlotOperand!
211 {
212 Comment cmnt2(masm_, "[ copy context parameters into .context");
213
214 // Note that iteration order is relevant here! If we have the same
215 // parameter twice (e.g., function (x, y, x)), and that parameter
216 // needs to be copied into the context, it must be the last argument
217 // passed to the parameter that needs to be copied. This is a rare
218 // case so we don't check for it, instead we rely on the copying
219 // order: such a parameter is copied repeatedly into the same
220 // context location and thus the last value is what is seen inside
221 // the function.
222 for (int i = 0; i < scope_->num_parameters(); i++) {
223 Variable* par = scope_->parameter(i);
224 Slot* slot = par->slot();
225 if (slot != NULL && slot->type() == Slot::CONTEXT) {
226 ASSERT(!scope_->is_global_scope()); // no parameters in global scope
227 __ ldr(r1, frame_->ParameterAt(i));
228 // Loads r2 with context; used below in RecordWrite.
229 __ str(r1, SlotOperand(slot, r2));
230 // Load the offset into r3.
231 int slot_offset =
232 FixedArray::kHeaderSize + slot->index() * kPointerSize;
233 __ mov(r3, Operand(slot_offset));
234 __ RecordWrite(r2, r3, r1);
235 }
236 }
237 }
238
239 // Store the arguments object. This must happen after context
240 // initialization because the arguments object may be stored in the
241 // context.
242 if (scope_->arguments() != NULL) {
243 ASSERT(scope_->arguments_shadow() != NULL);
244 Comment cmnt(masm_, "[ allocate arguments object");
245 { Reference shadow_ref(this, scope_->arguments_shadow());
246 { Reference arguments_ref(this, scope_->arguments());
247 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
248 __ ldr(r2, frame_->Function());
249 // The receiver is below the arguments, the return address,
250 // and the frame pointer on the stack.
251 const int kReceiverDisplacement = 2 + scope_->num_parameters();
252 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
253 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
254 frame_->Adjust(3);
255 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
256 frame_->CallStub(&stub, 3);
257 frame_->EmitPush(r0);
258 arguments_ref.SetValue(NOT_CONST_INIT);
259 }
260 shadow_ref.SetValue(NOT_CONST_INIT);
261 }
262 frame_->Drop(); // Value is no longer needed.
263 }
264
265 // Generate code to 'execute' declarations and initialize functions
266 // (source elements). In case of an illegal redeclaration we need to
267 // handle that instead of processing the declarations.
268 if (scope_->HasIllegalRedeclaration()) {
269 Comment cmnt(masm_, "[ illegal redeclarations");
270 scope_->VisitIllegalRedeclaration(this);
271 } else {
272 Comment cmnt(masm_, "[ declarations");
273 ProcessDeclarations(scope_->declarations());
274 // Bail out if a stack-overflow exception occurred when processing
275 // declarations.
276 if (HasStackOverflow()) return;
277 }
278
279 if (FLAG_trace) {
280 frame_->CallRuntime(Runtime::kTraceEnter, 0);
281 // Ignore the return value.
282 }
283
284 // Compile the body of the function in a vanilla state. Don't
285 // bother compiling all the code if the scope has an illegal
286 // redeclaration.
287 if (!scope_->HasIllegalRedeclaration()) {
288 Comment cmnt(masm_, "[ function body");
289#ifdef DEBUG
290 bool is_builtin = Bootstrapper::IsActive();
291 bool should_trace =
292 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
293 if (should_trace) {
294 frame_->CallRuntime(Runtime::kDebugTrace, 0);
295 // Ignore the return value.
296 }
297#endif
298 VisitStatementsAndSpill(body);
299 }
300 }
301
302 // Generate the return sequence if necessary.
303 if (has_valid_frame() || function_return_.is_linked()) {
304 if (!function_return_.is_linked()) {
305 CodeForReturnPosition(fun);
306 }
307 // exit
308 // r0: result
309 // sp: stack pointer
310 // fp: frame pointer
311 // cp: callee's context
312 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
313
314 function_return_.Bind();
315 if (FLAG_trace) {
316 // Push the return value on the stack as the parameter.
317 // Runtime::TraceExit returns the parameter as it is.
318 frame_->EmitPush(r0);
319 frame_->CallRuntime(Runtime::kTraceExit, 1);
320 }
321
322 // Add a label for checking the size of the code used for returning.
323 Label check_exit_codesize;
324 masm_->bind(&check_exit_codesize);
325
Steve Blockd0582a62009-12-15 09:54:21 +0000326 // Calculate the exact length of the return sequence and make sure that
327 // the constant pool is not emitted inside of the return sequence.
328 int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
329 int return_sequence_length = Assembler::kJSReturnSequenceLength;
330 if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
331 // Additional mov instruction generated.
332 return_sequence_length++;
333 }
334 masm_->BlockConstPoolFor(return_sequence_length);
335
Steve Blocka7e24c12009-10-30 11:49:00 +0000336 // Tear down the frame which will restore the caller's frame pointer and
337 // the link register.
338 frame_->Exit();
339
340 // Here we use masm_-> instead of the __ macro to avoid the code coverage
341 // tool from instrumenting as we rely on the code size here.
Steve Blockd0582a62009-12-15 09:54:21 +0000342 masm_->add(sp, sp, Operand(sp_delta));
Steve Blocka7e24c12009-10-30 11:49:00 +0000343 masm_->Jump(lr);
344
345 // Check that the size of the code used for returning matches what is
Steve Blockd0582a62009-12-15 09:54:21 +0000346 // expected by the debugger. The add instruction above is an addressing
347 // mode 1 instruction where there are restrictions on which immediate values
348 // can be encoded in the instruction and which immediate values requires
349 // use of an additional instruction for moving the immediate to a temporary
350 // register.
351 ASSERT_EQ(return_sequence_length,
Steve Blocka7e24c12009-10-30 11:49:00 +0000352 masm_->InstructionsGeneratedSince(&check_exit_codesize));
353 }
354
355 // Code generation state must be reset.
356 ASSERT(!has_cc());
357 ASSERT(state_ == NULL);
358 ASSERT(!function_return_is_shadowed_);
359 function_return_.Unuse();
360 DeleteFrame();
361
362 // Process any deferred code using the register allocator.
363 if (!HasStackOverflow()) {
364 ProcessDeferred();
365 }
366
367 allocator_ = NULL;
368 scope_ = NULL;
369}
370
371
372MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
373 // Currently, this assertion will fail if we try to assign to
374 // a constant variable that is constant because it is read-only
375 // (such as the variable referring to a named function expression).
376 // We need to implement assignments to read-only variables.
377 // Ideally, we should do this during AST generation (by converting
378 // such assignments into expression statements); however, in general
379 // we may not be able to make the decision until past AST generation,
380 // that is when the entire program is known.
381 ASSERT(slot != NULL);
382 int index = slot->index();
383 switch (slot->type()) {
384 case Slot::PARAMETER:
385 return frame_->ParameterAt(index);
386
387 case Slot::LOCAL:
388 return frame_->LocalAt(index);
389
390 case Slot::CONTEXT: {
391 // Follow the context chain if necessary.
392 ASSERT(!tmp.is(cp)); // do not overwrite context register
393 Register context = cp;
394 int chain_length = scope()->ContextChainLength(slot->var()->scope());
395 for (int i = 0; i < chain_length; i++) {
396 // Load the closure.
397 // (All contexts, even 'with' contexts, have a closure,
398 // and it is the same for all contexts inside a function.
399 // There is no need to go to the function context first.)
400 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
401 // Load the function context (which is the incoming, outer context).
402 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
403 context = tmp;
404 }
405 // We may have a 'with' context now. Get the function context.
406 // (In fact this mov may never be the needed, since the scope analysis
407 // may not permit a direct context access in this case and thus we are
408 // always at a function context. However it is safe to dereference be-
409 // cause the function context of a function context is itself. Before
410 // deleting this mov we should try to create a counter-example first,
411 // though...)
412 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
413 return ContextOperand(tmp, index);
414 }
415
416 default:
417 UNREACHABLE();
418 return MemOperand(r0, 0);
419 }
420}
421
422
423MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
424 Slot* slot,
425 Register tmp,
426 Register tmp2,
427 JumpTarget* slow) {
428 ASSERT(slot->type() == Slot::CONTEXT);
429 Register context = cp;
430
431 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
432 if (s->num_heap_slots() > 0) {
433 if (s->calls_eval()) {
434 // Check that extension is NULL.
435 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
436 __ tst(tmp2, tmp2);
437 slow->Branch(ne);
438 }
439 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
440 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
441 context = tmp;
442 }
443 }
444 // Check that last extension is NULL.
445 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
446 __ tst(tmp2, tmp2);
447 slow->Branch(ne);
448 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
449 return ContextOperand(tmp, slot->index());
450}
451
452
453// Loads a value on TOS. If it is a boolean value, the result may have been
454// (partially) translated into branches, or it may have set the condition
455// code register. If force_cc is set, the value is forced to set the
456// condition code register and no value is pushed. If the condition code
457// register was set, has_cc() is true and cc_reg_ contains the condition to
458// test for 'true'.
459void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000460 JumpTarget* true_target,
461 JumpTarget* false_target,
462 bool force_cc) {
463 ASSERT(!has_cc());
464 int original_height = frame_->height();
465
Steve Blockd0582a62009-12-15 09:54:21 +0000466 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000467 Visit(x);
468
469 // If we hit a stack overflow, we may not have actually visited
470 // the expression. In that case, we ensure that we have a
471 // valid-looking frame state because we will continue to generate
472 // code as we unwind the C++ stack.
473 //
474 // It's possible to have both a stack overflow and a valid frame
475 // state (eg, a subexpression overflowed, visiting it returned
476 // with a dummied frame state, and visiting this expression
477 // returned with a normal-looking state).
478 if (HasStackOverflow() &&
479 has_valid_frame() &&
480 !has_cc() &&
481 frame_->height() == original_height) {
482 true_target->Jump();
483 }
484 }
485 if (force_cc && frame_ != NULL && !has_cc()) {
486 // Convert the TOS value to a boolean in the condition code register.
487 ToBoolean(true_target, false_target);
488 }
489 ASSERT(!force_cc || !has_valid_frame() || has_cc());
490 ASSERT(!has_valid_frame() ||
491 (has_cc() && frame_->height() == original_height) ||
492 (!has_cc() && frame_->height() == original_height + 1));
493}
494
495
Steve Blockd0582a62009-12-15 09:54:21 +0000496void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000497#ifdef DEBUG
498 int original_height = frame_->height();
499#endif
500 JumpTarget true_target;
501 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000502 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000503
504 if (has_cc()) {
505 // Convert cc_reg_ into a boolean value.
506 JumpTarget loaded;
507 JumpTarget materialize_true;
508 materialize_true.Branch(cc_reg_);
509 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
510 frame_->EmitPush(r0);
511 loaded.Jump();
512 materialize_true.Bind();
513 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
514 frame_->EmitPush(r0);
515 loaded.Bind();
516 cc_reg_ = al;
517 }
518
519 if (true_target.is_linked() || false_target.is_linked()) {
520 // We have at least one condition value that has been "translated"
521 // into a branch, thus it needs to be loaded explicitly.
522 JumpTarget loaded;
523 if (frame_ != NULL) {
524 loaded.Jump(); // Don't lose the current TOS.
525 }
526 bool both = true_target.is_linked() && false_target.is_linked();
527 // Load "true" if necessary.
528 if (true_target.is_linked()) {
529 true_target.Bind();
530 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
531 frame_->EmitPush(r0);
532 }
533 // If both "true" and "false" need to be loaded jump across the code for
534 // "false".
535 if (both) {
536 loaded.Jump();
537 }
538 // Load "false" if necessary.
539 if (false_target.is_linked()) {
540 false_target.Bind();
541 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
542 frame_->EmitPush(r0);
543 }
544 // A value is loaded on all paths reaching this point.
545 loaded.Bind();
546 }
547 ASSERT(has_valid_frame());
548 ASSERT(!has_cc());
549 ASSERT(frame_->height() == original_height + 1);
550}
551
552
553void CodeGenerator::LoadGlobal() {
554 VirtualFrame::SpilledScope spilled_scope;
555 __ ldr(r0, GlobalObject());
556 frame_->EmitPush(r0);
557}
558
559
560void CodeGenerator::LoadGlobalReceiver(Register scratch) {
561 VirtualFrame::SpilledScope spilled_scope;
562 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
563 __ ldr(scratch,
564 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
565 frame_->EmitPush(scratch);
566}
567
568
Steve Blockd0582a62009-12-15 09:54:21 +0000569void CodeGenerator::LoadTypeofExpression(Expression* expr) {
570 // Special handling of identifiers as subexpressions of typeof.
Steve Blocka7e24c12009-10-30 11:49:00 +0000571 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +0000572 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000573 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000574 // For a global variable we build the property reference
575 // <global>.<variable> and perform a (regular non-contextual) property
576 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000577 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
578 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000579 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000580 Reference ref(this, &property);
581 ref.GetValueAndSpill();
582 } else if (variable != NULL && variable->slot() != NULL) {
583 // For a variable that rewrites to a slot, we signal it is the immediate
584 // subexpression of a typeof.
585 LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
586 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000587 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000588 // Anything else can be handled normally.
589 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000590 }
591}
592
593
594Reference::Reference(CodeGenerator* cgen, Expression* expression)
595 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
596 cgen->LoadReference(this);
597}
598
599
600Reference::~Reference() {
601 cgen_->UnloadReference(this);
602}
603
604
605void CodeGenerator::LoadReference(Reference* ref) {
606 VirtualFrame::SpilledScope spilled_scope;
607 Comment cmnt(masm_, "[ LoadReference");
608 Expression* e = ref->expression();
609 Property* property = e->AsProperty();
610 Variable* var = e->AsVariableProxy()->AsVariable();
611
612 if (property != NULL) {
613 // The expression is either a property or a variable proxy that rewrites
614 // to a property.
615 LoadAndSpill(property->obj());
616 // We use a named reference if the key is a literal symbol, unless it is
617 // a string that can be legally parsed as an integer. This is because
618 // otherwise we will not get into the slow case code that handles [] on
619 // String objects.
620 Literal* literal = property->key()->AsLiteral();
621 uint32_t dummy;
622 if (literal != NULL &&
623 literal->handle()->IsSymbol() &&
624 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
625 ref->set_type(Reference::NAMED);
626 } else {
627 LoadAndSpill(property->key());
628 ref->set_type(Reference::KEYED);
629 }
630 } else if (var != NULL) {
631 // The expression is a variable proxy that does not rewrite to a
632 // property. Global variables are treated as named property references.
633 if (var->is_global()) {
634 LoadGlobal();
635 ref->set_type(Reference::NAMED);
636 } else {
637 ASSERT(var->slot() != NULL);
638 ref->set_type(Reference::SLOT);
639 }
640 } else {
641 // Anything else is a runtime error.
642 LoadAndSpill(e);
643 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
644 }
645}
646
647
648void CodeGenerator::UnloadReference(Reference* ref) {
649 VirtualFrame::SpilledScope spilled_scope;
650 // Pop a reference from the stack while preserving TOS.
651 Comment cmnt(masm_, "[ UnloadReference");
652 int size = ref->size();
653 if (size > 0) {
654 frame_->EmitPop(r0);
655 frame_->Drop(size);
656 frame_->EmitPush(r0);
657 }
658}
659
660
661// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
662// register to a boolean in the condition code register. The code
663// may jump to 'false_target' in case the register converts to 'false'.
664void CodeGenerator::ToBoolean(JumpTarget* true_target,
665 JumpTarget* false_target) {
666 VirtualFrame::SpilledScope spilled_scope;
667 // Note: The generated code snippet does not change stack variables.
668 // Only the condition code should be set.
669 frame_->EmitPop(r0);
670
671 // Fast case checks
672
673 // Check if the value is 'false'.
674 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
675 __ cmp(r0, ip);
676 false_target->Branch(eq);
677
678 // Check if the value is 'true'.
679 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
680 __ cmp(r0, ip);
681 true_target->Branch(eq);
682
683 // Check if the value is 'undefined'.
684 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
685 __ cmp(r0, ip);
686 false_target->Branch(eq);
687
688 // Check if the value is a smi.
689 __ cmp(r0, Operand(Smi::FromInt(0)));
690 false_target->Branch(eq);
691 __ tst(r0, Operand(kSmiTagMask));
692 true_target->Branch(eq);
693
694 // Slow case: call the runtime.
695 frame_->EmitPush(r0);
696 frame_->CallRuntime(Runtime::kToBool, 1);
697 // Convert the result (r0) to a condition code.
698 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
699 __ cmp(r0, ip);
700
701 cc_reg_ = ne;
702}
703
704
705void CodeGenerator::GenericBinaryOperation(Token::Value op,
706 OverwriteMode overwrite_mode,
707 int constant_rhs) {
708 VirtualFrame::SpilledScope spilled_scope;
709 // sp[0] : y
710 // sp[1] : x
711 // result : r0
712
713 // Stub is entered with a call: 'return address' is in lr.
714 switch (op) {
715 case Token::ADD: // fall through.
716 case Token::SUB: // fall through.
717 case Token::MUL:
718 case Token::DIV:
719 case Token::MOD:
720 case Token::BIT_OR:
721 case Token::BIT_AND:
722 case Token::BIT_XOR:
723 case Token::SHL:
724 case Token::SHR:
725 case Token::SAR: {
726 frame_->EmitPop(r0); // r0 : y
727 frame_->EmitPop(r1); // r1 : x
728 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
729 frame_->CallStub(&stub, 0);
730 break;
731 }
732
733 case Token::COMMA:
734 frame_->EmitPop(r0);
735 // simply discard left value
736 frame_->Drop();
737 break;
738
739 default:
740 // Other cases should have been handled before this point.
741 UNREACHABLE();
742 break;
743 }
744}
745
746
747class DeferredInlineSmiOperation: public DeferredCode {
748 public:
749 DeferredInlineSmiOperation(Token::Value op,
750 int value,
751 bool reversed,
752 OverwriteMode overwrite_mode)
753 : op_(op),
754 value_(value),
755 reversed_(reversed),
756 overwrite_mode_(overwrite_mode) {
757 set_comment("[ DeferredInlinedSmiOperation");
758 }
759
760 virtual void Generate();
761
762 private:
763 Token::Value op_;
764 int value_;
765 bool reversed_;
766 OverwriteMode overwrite_mode_;
767};
768
769
770void DeferredInlineSmiOperation::Generate() {
771 switch (op_) {
772 case Token::ADD: {
773 // Revert optimistic add.
774 if (reversed_) {
775 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
776 __ mov(r1, Operand(Smi::FromInt(value_)));
777 } else {
778 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
779 __ mov(r0, Operand(Smi::FromInt(value_)));
780 }
781 break;
782 }
783
784 case Token::SUB: {
785 // Revert optimistic sub.
786 if (reversed_) {
787 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
788 __ mov(r1, Operand(Smi::FromInt(value_)));
789 } else {
790 __ add(r1, r0, Operand(Smi::FromInt(value_)));
791 __ mov(r0, Operand(Smi::FromInt(value_)));
792 }
793 break;
794 }
795
796 // For these operations there is no optimistic operation that needs to be
797 // reverted.
798 case Token::MUL:
799 case Token::MOD:
800 case Token::BIT_OR:
801 case Token::BIT_XOR:
802 case Token::BIT_AND: {
803 if (reversed_) {
804 __ mov(r1, Operand(Smi::FromInt(value_)));
805 } else {
806 __ mov(r1, Operand(r0));
807 __ mov(r0, Operand(Smi::FromInt(value_)));
808 }
809 break;
810 }
811
812 case Token::SHL:
813 case Token::SHR:
814 case Token::SAR: {
815 if (!reversed_) {
816 __ mov(r1, Operand(r0));
817 __ mov(r0, Operand(Smi::FromInt(value_)));
818 } else {
819 UNREACHABLE(); // Should have been handled in SmiOperation.
820 }
821 break;
822 }
823
824 default:
825 // Other cases should have been handled before this point.
826 UNREACHABLE();
827 break;
828 }
829
830 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
831 __ CallStub(&stub);
832}
833
834
835static bool PopCountLessThanEqual2(unsigned int x) {
836 x &= x - 1;
837 return (x & (x - 1)) == 0;
838}
839
840
841// Returns the index of the lowest bit set.
842static int BitPosition(unsigned x) {
843 int bit_posn = 0;
844 while ((x & 0xf) == 0) {
845 bit_posn += 4;
846 x >>= 4;
847 }
848 while ((x & 1) == 0) {
849 bit_posn++;
850 x >>= 1;
851 }
852 return bit_posn;
853}
854
855
856void CodeGenerator::SmiOperation(Token::Value op,
857 Handle<Object> value,
858 bool reversed,
859 OverwriteMode mode) {
860 VirtualFrame::SpilledScope spilled_scope;
861 // NOTE: This is an attempt to inline (a bit) more of the code for
862 // some possible smi operations (like + and -) when (at least) one
863 // of the operands is a literal smi. With this optimization, the
864 // performance of the system is increased by ~15%, and the generated
865 // code size is increased by ~1% (measured on a combination of
866 // different benchmarks).
867
868 // sp[0] : operand
869
870 int int_value = Smi::cast(*value)->value();
871
872 JumpTarget exit;
873 frame_->EmitPop(r0);
874
875 bool something_to_inline = true;
876 switch (op) {
877 case Token::ADD: {
878 DeferredCode* deferred =
879 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
880
881 __ add(r0, r0, Operand(value), SetCC);
882 deferred->Branch(vs);
883 __ tst(r0, Operand(kSmiTagMask));
884 deferred->Branch(ne);
885 deferred->BindExit();
886 break;
887 }
888
889 case Token::SUB: {
890 DeferredCode* deferred =
891 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
892
893 if (reversed) {
894 __ rsb(r0, r0, Operand(value), SetCC);
895 } else {
896 __ sub(r0, r0, Operand(value), SetCC);
897 }
898 deferred->Branch(vs);
899 __ tst(r0, Operand(kSmiTagMask));
900 deferred->Branch(ne);
901 deferred->BindExit();
902 break;
903 }
904
905
906 case Token::BIT_OR:
907 case Token::BIT_XOR:
908 case Token::BIT_AND: {
909 DeferredCode* deferred =
910 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
911 __ tst(r0, Operand(kSmiTagMask));
912 deferred->Branch(ne);
913 switch (op) {
914 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
915 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
916 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
917 default: UNREACHABLE();
918 }
919 deferred->BindExit();
920 break;
921 }
922
923 case Token::SHL:
924 case Token::SHR:
925 case Token::SAR: {
926 if (reversed) {
927 something_to_inline = false;
928 break;
929 }
930 int shift_value = int_value & 0x1f; // least significant 5 bits
931 DeferredCode* deferred =
932 new DeferredInlineSmiOperation(op, shift_value, false, mode);
933 __ tst(r0, Operand(kSmiTagMask));
934 deferred->Branch(ne);
935 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
936 switch (op) {
937 case Token::SHL: {
938 if (shift_value != 0) {
939 __ mov(r2, Operand(r2, LSL, shift_value));
940 }
941 // check that the *unsigned* result fits in a smi
942 __ add(r3, r2, Operand(0x40000000), SetCC);
943 deferred->Branch(mi);
944 break;
945 }
946 case Token::SHR: {
947 // LSR by immediate 0 means shifting 32 bits.
948 if (shift_value != 0) {
949 __ mov(r2, Operand(r2, LSR, shift_value));
950 }
951 // check that the *unsigned* result fits in a smi
952 // neither of the two high-order bits can be set:
953 // - 0x80000000: high bit would be lost when smi tagging
954 // - 0x40000000: this number would convert to negative when
955 // smi tagging these two cases can only happen with shifts
956 // by 0 or 1 when handed a valid smi
957 __ and_(r3, r2, Operand(0xc0000000), SetCC);
958 deferred->Branch(ne);
959 break;
960 }
961 case Token::SAR: {
962 if (shift_value != 0) {
963 // ASR by immediate 0 means shifting 32 bits.
964 __ mov(r2, Operand(r2, ASR, shift_value));
965 }
966 break;
967 }
968 default: UNREACHABLE();
969 }
970 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
971 deferred->BindExit();
972 break;
973 }
974
975 case Token::MOD: {
976 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
977 something_to_inline = false;
978 break;
979 }
980 DeferredCode* deferred =
981 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
982 unsigned mask = (0x80000000u | kSmiTagMask);
983 __ tst(r0, Operand(mask));
984 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
985 mask = (int_value << kSmiTagSize) - 1;
986 __ and_(r0, r0, Operand(mask));
987 deferred->BindExit();
988 break;
989 }
990
991 case Token::MUL: {
992 if (!IsEasyToMultiplyBy(int_value)) {
993 something_to_inline = false;
994 break;
995 }
996 DeferredCode* deferred =
997 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
998 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
999 max_smi_that_wont_overflow <<= kSmiTagSize;
1000 unsigned mask = 0x80000000u;
1001 while ((mask & max_smi_that_wont_overflow) == 0) {
1002 mask |= mask >> 1;
1003 }
1004 mask |= kSmiTagMask;
1005 // This does a single mask that checks for a too high value in a
1006 // conservative way and for a non-Smi. It also filters out negative
1007 // numbers, unfortunately, but since this code is inline we prefer
1008 // brevity to comprehensiveness.
1009 __ tst(r0, Operand(mask));
1010 deferred->Branch(ne);
1011 MultiplyByKnownInt(masm_, r0, r0, int_value);
1012 deferred->BindExit();
1013 break;
1014 }
1015
1016 default:
1017 something_to_inline = false;
1018 break;
1019 }
1020
1021 if (!something_to_inline) {
1022 if (!reversed) {
1023 frame_->EmitPush(r0);
1024 __ mov(r0, Operand(value));
1025 frame_->EmitPush(r0);
1026 GenericBinaryOperation(op, mode, int_value);
1027 } else {
1028 __ mov(ip, Operand(value));
1029 frame_->EmitPush(ip);
1030 frame_->EmitPush(r0);
1031 GenericBinaryOperation(op, mode, kUnknownIntValue);
1032 }
1033 }
1034
1035 exit.Bind();
1036}
1037
1038
1039void CodeGenerator::Comparison(Condition cc,
1040 Expression* left,
1041 Expression* right,
1042 bool strict) {
1043 if (left != NULL) LoadAndSpill(left);
1044 if (right != NULL) LoadAndSpill(right);
1045
1046 VirtualFrame::SpilledScope spilled_scope;
1047 // sp[0] : y
1048 // sp[1] : x
1049 // result : cc register
1050
1051 // Strict only makes sense for equality comparisons.
1052 ASSERT(!strict || cc == eq);
1053
1054 JumpTarget exit;
1055 JumpTarget smi;
1056 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1057 if (cc == gt || cc == le) {
1058 cc = ReverseCondition(cc);
1059 frame_->EmitPop(r1);
1060 frame_->EmitPop(r0);
1061 } else {
1062 frame_->EmitPop(r0);
1063 frame_->EmitPop(r1);
1064 }
1065 __ orr(r2, r0, Operand(r1));
1066 __ tst(r2, Operand(kSmiTagMask));
1067 smi.Branch(eq);
1068
1069 // Perform non-smi comparison by stub.
1070 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1071 // We call with 0 args because there are 0 on the stack.
1072 CompareStub stub(cc, strict);
1073 frame_->CallStub(&stub, 0);
1074 __ cmp(r0, Operand(0));
1075 exit.Jump();
1076
1077 // Do smi comparisons by pointer comparison.
1078 smi.Bind();
1079 __ cmp(r1, Operand(r0));
1080
1081 exit.Bind();
1082 cc_reg_ = cc;
1083}
1084
1085
Steve Blocka7e24c12009-10-30 11:49:00 +00001086// Call the function on the stack with the given arguments.
1087void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1088 int position) {
1089 VirtualFrame::SpilledScope spilled_scope;
1090 // Push the arguments ("left-to-right") on the stack.
1091 int arg_count = args->length();
1092 for (int i = 0; i < arg_count; i++) {
1093 LoadAndSpill(args->at(i));
1094 }
1095
1096 // Record the position for debugging purposes.
1097 CodeForSourcePosition(position);
1098
1099 // Use the shared code stub to call the function.
1100 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1101 CallFunctionStub call_function(arg_count, in_loop);
1102 frame_->CallStub(&call_function, arg_count + 1);
1103
1104 // Restore context and pop function from the stack.
1105 __ ldr(cp, frame_->Context());
1106 frame_->Drop(); // discard the TOS
1107}
1108
1109
1110void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1111 VirtualFrame::SpilledScope spilled_scope;
1112 ASSERT(has_cc());
1113 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1114 target->Branch(cc);
1115 cc_reg_ = al;
1116}
1117
1118
1119void CodeGenerator::CheckStack() {
1120 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +00001121 Comment cmnt(masm_, "[ check stack");
1122 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1123 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1124 // the implicit 8 byte offset that always applies to operations with pc and
1125 // gives a return address 12 bytes down.
1126 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1127 masm_->cmp(sp, Operand(ip));
1128 StackCheckStub stub;
1129 // Call the stub if lower.
1130 masm_->mov(pc,
1131 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1132 RelocInfo::CODE_TARGET),
1133 LeaveCC,
1134 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001135}
1136
1137
1138void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1139#ifdef DEBUG
1140 int original_height = frame_->height();
1141#endif
1142 VirtualFrame::SpilledScope spilled_scope;
1143 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1144 VisitAndSpill(statements->at(i));
1145 }
1146 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1147}
1148
1149
1150void CodeGenerator::VisitBlock(Block* node) {
1151#ifdef DEBUG
1152 int original_height = frame_->height();
1153#endif
1154 VirtualFrame::SpilledScope spilled_scope;
1155 Comment cmnt(masm_, "[ Block");
1156 CodeForStatementPosition(node);
1157 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1158 VisitStatementsAndSpill(node->statements());
1159 if (node->break_target()->is_linked()) {
1160 node->break_target()->Bind();
1161 }
1162 node->break_target()->Unuse();
1163 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1164}
1165
1166
1167void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1168 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001169 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001170 __ mov(r0, Operand(pairs));
1171 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001172 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1173 frame_->EmitPush(r0);
1174 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1175 // The result is discarded.
1176}
1177
1178
1179void CodeGenerator::VisitDeclaration(Declaration* node) {
1180#ifdef DEBUG
1181 int original_height = frame_->height();
1182#endif
1183 VirtualFrame::SpilledScope spilled_scope;
1184 Comment cmnt(masm_, "[ Declaration");
1185 Variable* var = node->proxy()->var();
1186 ASSERT(var != NULL); // must have been resolved
1187 Slot* slot = var->slot();
1188
1189 // If it was not possible to allocate the variable at compile time,
1190 // we need to "declare" it at runtime to make sure it actually
1191 // exists in the local context.
1192 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1193 // Variables with a "LOOKUP" slot were introduced as non-locals
1194 // during variable resolution and must have mode DYNAMIC.
1195 ASSERT(var->is_dynamic());
1196 // For now, just do a runtime call.
1197 frame_->EmitPush(cp);
1198 __ mov(r0, Operand(var->name()));
1199 frame_->EmitPush(r0);
1200 // Declaration nodes are always declared in only two modes.
1201 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1202 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1203 __ mov(r0, Operand(Smi::FromInt(attr)));
1204 frame_->EmitPush(r0);
1205 // Push initial value, if any.
1206 // Note: For variables we must not push an initial value (such as
1207 // 'undefined') because we may have a (legal) redeclaration and we
1208 // must not destroy the current value.
1209 if (node->mode() == Variable::CONST) {
1210 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1211 frame_->EmitPush(r0);
1212 } else if (node->fun() != NULL) {
1213 LoadAndSpill(node->fun());
1214 } else {
1215 __ mov(r0, Operand(0)); // no initial value!
1216 frame_->EmitPush(r0);
1217 }
1218 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1219 // Ignore the return value (declarations are statements).
1220 ASSERT(frame_->height() == original_height);
1221 return;
1222 }
1223
1224 ASSERT(!var->is_global());
1225
1226 // If we have a function or a constant, we need to initialize the variable.
1227 Expression* val = NULL;
1228 if (node->mode() == Variable::CONST) {
1229 val = new Literal(Factory::the_hole_value());
1230 } else {
1231 val = node->fun(); // NULL if we don't have a function
1232 }
1233
1234 if (val != NULL) {
1235 {
1236 // Set initial value.
1237 Reference target(this, node->proxy());
1238 LoadAndSpill(val);
1239 target.SetValue(NOT_CONST_INIT);
1240 // The reference is removed from the stack (preserving TOS) when
1241 // it goes out of scope.
1242 }
1243 // Get rid of the assigned value (declarations are statements).
1244 frame_->Drop();
1245 }
1246 ASSERT(frame_->height() == original_height);
1247}
1248
1249
1250void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1251#ifdef DEBUG
1252 int original_height = frame_->height();
1253#endif
1254 VirtualFrame::SpilledScope spilled_scope;
1255 Comment cmnt(masm_, "[ ExpressionStatement");
1256 CodeForStatementPosition(node);
1257 Expression* expression = node->expression();
1258 expression->MarkAsStatement();
1259 LoadAndSpill(expression);
1260 frame_->Drop();
1261 ASSERT(frame_->height() == original_height);
1262}
1263
1264
1265void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1266#ifdef DEBUG
1267 int original_height = frame_->height();
1268#endif
1269 VirtualFrame::SpilledScope spilled_scope;
1270 Comment cmnt(masm_, "// EmptyStatement");
1271 CodeForStatementPosition(node);
1272 // nothing to do
1273 ASSERT(frame_->height() == original_height);
1274}
1275
1276
1277void CodeGenerator::VisitIfStatement(IfStatement* node) {
1278#ifdef DEBUG
1279 int original_height = frame_->height();
1280#endif
1281 VirtualFrame::SpilledScope spilled_scope;
1282 Comment cmnt(masm_, "[ IfStatement");
1283 // Generate different code depending on which parts of the if statement
1284 // are present or not.
1285 bool has_then_stm = node->HasThenStatement();
1286 bool has_else_stm = node->HasElseStatement();
1287
1288 CodeForStatementPosition(node);
1289
1290 JumpTarget exit;
1291 if (has_then_stm && has_else_stm) {
1292 Comment cmnt(masm_, "[ IfThenElse");
1293 JumpTarget then;
1294 JumpTarget else_;
1295 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001296 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001297 if (frame_ != NULL) {
1298 Branch(false, &else_);
1299 }
1300 // then
1301 if (frame_ != NULL || then.is_linked()) {
1302 then.Bind();
1303 VisitAndSpill(node->then_statement());
1304 }
1305 if (frame_ != NULL) {
1306 exit.Jump();
1307 }
1308 // else
1309 if (else_.is_linked()) {
1310 else_.Bind();
1311 VisitAndSpill(node->else_statement());
1312 }
1313
1314 } else if (has_then_stm) {
1315 Comment cmnt(masm_, "[ IfThen");
1316 ASSERT(!has_else_stm);
1317 JumpTarget then;
1318 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001319 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001320 if (frame_ != NULL) {
1321 Branch(false, &exit);
1322 }
1323 // then
1324 if (frame_ != NULL || then.is_linked()) {
1325 then.Bind();
1326 VisitAndSpill(node->then_statement());
1327 }
1328
1329 } else if (has_else_stm) {
1330 Comment cmnt(masm_, "[ IfElse");
1331 ASSERT(!has_then_stm);
1332 JumpTarget else_;
1333 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001334 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001335 if (frame_ != NULL) {
1336 Branch(true, &exit);
1337 }
1338 // else
1339 if (frame_ != NULL || else_.is_linked()) {
1340 else_.Bind();
1341 VisitAndSpill(node->else_statement());
1342 }
1343
1344 } else {
1345 Comment cmnt(masm_, "[ If");
1346 ASSERT(!has_then_stm && !has_else_stm);
1347 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001348 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001349 if (frame_ != NULL) {
1350 if (has_cc()) {
1351 cc_reg_ = al;
1352 } else {
1353 frame_->Drop();
1354 }
1355 }
1356 }
1357
1358 // end
1359 if (exit.is_linked()) {
1360 exit.Bind();
1361 }
1362 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1363}
1364
1365
1366void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1367 VirtualFrame::SpilledScope spilled_scope;
1368 Comment cmnt(masm_, "[ ContinueStatement");
1369 CodeForStatementPosition(node);
1370 node->target()->continue_target()->Jump();
1371}
1372
1373
1374void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1375 VirtualFrame::SpilledScope spilled_scope;
1376 Comment cmnt(masm_, "[ BreakStatement");
1377 CodeForStatementPosition(node);
1378 node->target()->break_target()->Jump();
1379}
1380
1381
1382void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1383 VirtualFrame::SpilledScope spilled_scope;
1384 Comment cmnt(masm_, "[ ReturnStatement");
1385
1386 CodeForStatementPosition(node);
1387 LoadAndSpill(node->expression());
1388 if (function_return_is_shadowed_) {
1389 frame_->EmitPop(r0);
1390 function_return_.Jump();
1391 } else {
1392 // Pop the result from the frame and prepare the frame for
1393 // returning thus making it easier to merge.
1394 frame_->EmitPop(r0);
1395 frame_->PrepareForReturn();
1396
1397 function_return_.Jump();
1398 }
1399}
1400
1401
1402void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1403#ifdef DEBUG
1404 int original_height = frame_->height();
1405#endif
1406 VirtualFrame::SpilledScope spilled_scope;
1407 Comment cmnt(masm_, "[ WithEnterStatement");
1408 CodeForStatementPosition(node);
1409 LoadAndSpill(node->expression());
1410 if (node->is_catch_block()) {
1411 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1412 } else {
1413 frame_->CallRuntime(Runtime::kPushContext, 1);
1414 }
1415#ifdef DEBUG
1416 JumpTarget verified_true;
1417 __ cmp(r0, Operand(cp));
1418 verified_true.Branch(eq);
1419 __ stop("PushContext: r0 is expected to be the same as cp");
1420 verified_true.Bind();
1421#endif
1422 // Update context local.
1423 __ str(cp, frame_->Context());
1424 ASSERT(frame_->height() == original_height);
1425}
1426
1427
1428void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1429#ifdef DEBUG
1430 int original_height = frame_->height();
1431#endif
1432 VirtualFrame::SpilledScope spilled_scope;
1433 Comment cmnt(masm_, "[ WithExitStatement");
1434 CodeForStatementPosition(node);
1435 // Pop context.
1436 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1437 // Update context local.
1438 __ str(cp, frame_->Context());
1439 ASSERT(frame_->height() == original_height);
1440}
1441
1442
1443void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1444#ifdef DEBUG
1445 int original_height = frame_->height();
1446#endif
1447 VirtualFrame::SpilledScope spilled_scope;
1448 Comment cmnt(masm_, "[ SwitchStatement");
1449 CodeForStatementPosition(node);
1450 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1451
1452 LoadAndSpill(node->tag());
1453
1454 JumpTarget next_test;
1455 JumpTarget fall_through;
1456 JumpTarget default_entry;
1457 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1458 ZoneList<CaseClause*>* cases = node->cases();
1459 int length = cases->length();
1460 CaseClause* default_clause = NULL;
1461
1462 for (int i = 0; i < length; i++) {
1463 CaseClause* clause = cases->at(i);
1464 if (clause->is_default()) {
1465 // Remember the default clause and compile it at the end.
1466 default_clause = clause;
1467 continue;
1468 }
1469
1470 Comment cmnt(masm_, "[ Case clause");
1471 // Compile the test.
1472 next_test.Bind();
1473 next_test.Unuse();
1474 // Duplicate TOS.
1475 __ ldr(r0, frame_->Top());
1476 frame_->EmitPush(r0);
1477 Comparison(eq, NULL, clause->label(), true);
1478 Branch(false, &next_test);
1479
1480 // Before entering the body from the test, remove the switch value from
1481 // the stack.
1482 frame_->Drop();
1483
1484 // Label the body so that fall through is enabled.
1485 if (i > 0 && cases->at(i - 1)->is_default()) {
1486 default_exit.Bind();
1487 } else {
1488 fall_through.Bind();
1489 fall_through.Unuse();
1490 }
1491 VisitStatementsAndSpill(clause->statements());
1492
1493 // If control flow can fall through from the body, jump to the next body
1494 // or the end of the statement.
1495 if (frame_ != NULL) {
1496 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1497 default_entry.Jump();
1498 } else {
1499 fall_through.Jump();
1500 }
1501 }
1502 }
1503
1504 // The final "test" removes the switch value.
1505 next_test.Bind();
1506 frame_->Drop();
1507
1508 // If there is a default clause, compile it.
1509 if (default_clause != NULL) {
1510 Comment cmnt(masm_, "[ Default clause");
1511 default_entry.Bind();
1512 VisitStatementsAndSpill(default_clause->statements());
1513 // If control flow can fall out of the default and there is a case after
1514 // it, jup to that case's body.
1515 if (frame_ != NULL && default_exit.is_bound()) {
1516 default_exit.Jump();
1517 }
1518 }
1519
1520 if (fall_through.is_linked()) {
1521 fall_through.Bind();
1522 }
1523
1524 if (node->break_target()->is_linked()) {
1525 node->break_target()->Bind();
1526 }
1527 node->break_target()->Unuse();
1528 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1529}
1530
1531
Steve Block3ce2e202009-11-05 08:53:23 +00001532void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001533#ifdef DEBUG
1534 int original_height = frame_->height();
1535#endif
1536 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001537 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001538 CodeForStatementPosition(node);
1539 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001540 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001541
Steve Block3ce2e202009-11-05 08:53:23 +00001542 // Label the top of the loop for the backward CFG edge. If the test
1543 // is always true we can use the continue target, and if the test is
1544 // always false there is no need.
1545 ConditionAnalysis info = AnalyzeCondition(node->cond());
1546 switch (info) {
1547 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001548 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1549 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001550 break;
1551 case ALWAYS_FALSE:
1552 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1553 break;
1554 case DONT_KNOW:
1555 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1556 body.Bind();
1557 break;
1558 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001559
Steve Block3ce2e202009-11-05 08:53:23 +00001560 CheckStack(); // TODO(1222600): ignore if body contains calls.
1561 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001562
Steve Blockd0582a62009-12-15 09:54:21 +00001563 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001564 switch (info) {
1565 case ALWAYS_TRUE:
1566 // If control can fall off the end of the body, jump back to the
1567 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001568 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001569 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001570 }
1571 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001572 case ALWAYS_FALSE:
1573 // If we have a continue in the body, we only have to bind its
1574 // jump target.
1575 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001576 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001577 }
Steve Block3ce2e202009-11-05 08:53:23 +00001578 break;
1579 case DONT_KNOW:
1580 // We have to compile the test expression if it can be reached by
1581 // control flow falling out of the body or via continue.
1582 if (node->continue_target()->is_linked()) {
1583 node->continue_target()->Bind();
1584 }
1585 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001586 Comment cmnt(masm_, "[ DoWhileCondition");
1587 CodeForDoWhileConditionPosition(node);
1588 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001589 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001590 // A invalid frame here indicates that control did not
1591 // fall out of the test expression.
1592 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001593 }
1594 }
1595 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 }
1597
1598 if (node->break_target()->is_linked()) {
1599 node->break_target()->Bind();
1600 }
Steve Block3ce2e202009-11-05 08:53:23 +00001601 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1602}
1603
1604
1605void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1606#ifdef DEBUG
1607 int original_height = frame_->height();
1608#endif
1609 VirtualFrame::SpilledScope spilled_scope;
1610 Comment cmnt(masm_, "[ WhileStatement");
1611 CodeForStatementPosition(node);
1612
1613 // If the test is never true and has no side effects there is no need
1614 // to compile the test or body.
1615 ConditionAnalysis info = AnalyzeCondition(node->cond());
1616 if (info == ALWAYS_FALSE) return;
1617
1618 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1619
1620 // Label the top of the loop with the continue target for the backward
1621 // CFG edge.
1622 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1623 node->continue_target()->Bind();
1624
1625 if (info == DONT_KNOW) {
1626 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001627 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001628 if (has_valid_frame()) {
1629 // A NULL frame indicates that control did not fall out of the
1630 // test expression.
1631 Branch(false, node->break_target());
1632 }
1633 if (has_valid_frame() || body.is_linked()) {
1634 body.Bind();
1635 }
1636 }
1637
1638 if (has_valid_frame()) {
1639 CheckStack(); // TODO(1222600): ignore if body contains calls.
1640 VisitAndSpill(node->body());
1641
1642 // If control flow can fall out of the body, jump back to the top.
1643 if (has_valid_frame()) {
1644 node->continue_target()->Jump();
1645 }
1646 }
1647 if (node->break_target()->is_linked()) {
1648 node->break_target()->Bind();
1649 }
1650 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1651}
1652
1653
1654void CodeGenerator::VisitForStatement(ForStatement* node) {
1655#ifdef DEBUG
1656 int original_height = frame_->height();
1657#endif
1658 VirtualFrame::SpilledScope spilled_scope;
1659 Comment cmnt(masm_, "[ ForStatement");
1660 CodeForStatementPosition(node);
1661 if (node->init() != NULL) {
1662 VisitAndSpill(node->init());
1663 }
1664
1665 // If the test is never true there is no need to compile the test or
1666 // body.
1667 ConditionAnalysis info = AnalyzeCondition(node->cond());
1668 if (info == ALWAYS_FALSE) return;
1669
1670 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1671
1672 // If there is no update statement, label the top of the loop with the
1673 // continue target, otherwise with the loop target.
1674 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1675 if (node->next() == NULL) {
1676 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1677 node->continue_target()->Bind();
1678 } else {
1679 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1680 loop.Bind();
1681 }
1682
1683 // If the test is always true, there is no need to compile it.
1684 if (info == DONT_KNOW) {
1685 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001686 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001687 if (has_valid_frame()) {
1688 Branch(false, node->break_target());
1689 }
1690 if (has_valid_frame() || body.is_linked()) {
1691 body.Bind();
1692 }
1693 }
1694
1695 if (has_valid_frame()) {
1696 CheckStack(); // TODO(1222600): ignore if body contains calls.
1697 VisitAndSpill(node->body());
1698
1699 if (node->next() == NULL) {
1700 // If there is no update statement and control flow can fall out
1701 // of the loop, jump directly to the continue label.
1702 if (has_valid_frame()) {
1703 node->continue_target()->Jump();
1704 }
1705 } else {
1706 // If there is an update statement and control flow can reach it
1707 // via falling out of the body of the loop or continuing, we
1708 // compile the update statement.
1709 if (node->continue_target()->is_linked()) {
1710 node->continue_target()->Bind();
1711 }
1712 if (has_valid_frame()) {
1713 // Record source position of the statement as this code which is
1714 // after the code for the body actually belongs to the loop
1715 // statement and not the body.
1716 CodeForStatementPosition(node);
1717 VisitAndSpill(node->next());
1718 loop.Jump();
1719 }
1720 }
1721 }
1722 if (node->break_target()->is_linked()) {
1723 node->break_target()->Bind();
1724 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001725 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1726}
1727
1728
1729void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1730#ifdef DEBUG
1731 int original_height = frame_->height();
1732#endif
1733 VirtualFrame::SpilledScope spilled_scope;
1734 Comment cmnt(masm_, "[ ForInStatement");
1735 CodeForStatementPosition(node);
1736
1737 JumpTarget primitive;
1738 JumpTarget jsobject;
1739 JumpTarget fixed_array;
1740 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1741 JumpTarget end_del_check;
1742 JumpTarget exit;
1743
1744 // Get the object to enumerate over (converted to JSObject).
1745 LoadAndSpill(node->enumerable());
1746
1747 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1748 // to the specification. 12.6.4 mandates a call to ToObject.
1749 frame_->EmitPop(r0);
1750 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1751 __ cmp(r0, ip);
1752 exit.Branch(eq);
1753 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1754 __ cmp(r0, ip);
1755 exit.Branch(eq);
1756
1757 // Stack layout in body:
1758 // [iteration counter (Smi)]
1759 // [length of array]
1760 // [FixedArray]
1761 // [Map or 0]
1762 // [Object]
1763
1764 // Check if enumerable is already a JSObject
1765 __ tst(r0, Operand(kSmiTagMask));
1766 primitive.Branch(eq);
1767 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1768 jsobject.Branch(hs);
1769
1770 primitive.Bind();
1771 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00001772 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001773
1774 jsobject.Bind();
1775 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00001776 // r0: value to be iterated over
1777 frame_->EmitPush(r0); // Push the object being iterated over.
1778
1779 // Check cache validity in generated code. This is a fast case for
1780 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1781 // guarantee cache validity, call the runtime system to check cache
1782 // validity or get the property names in a fixed array.
1783 JumpTarget call_runtime;
1784 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1785 JumpTarget check_prototype;
1786 JumpTarget use_cache;
1787 __ mov(r1, Operand(r0));
1788 loop.Bind();
1789 // Check that there are no elements.
1790 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
1791 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
1792 __ cmp(r2, r4);
1793 call_runtime.Branch(ne);
1794 // Check that instance descriptors are not empty so that we can
1795 // check for an enum cache. Leave the map in r3 for the subsequent
1796 // prototype load.
1797 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
1798 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
1799 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
1800 __ cmp(r2, ip);
1801 call_runtime.Branch(eq);
1802 // Check that there in an enum cache in the non-empty instance
1803 // descriptors. This is the case if the next enumeration index
1804 // field does not contain a smi.
1805 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
1806 __ tst(r2, Operand(kSmiTagMask));
1807 call_runtime.Branch(eq);
1808 // For all objects but the receiver, check that the cache is empty.
1809 // r4: empty fixed array root.
1810 __ cmp(r1, r0);
1811 check_prototype.Branch(eq);
1812 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1813 __ cmp(r2, r4);
1814 call_runtime.Branch(ne);
1815 check_prototype.Bind();
1816 // Load the prototype from the map and loop if non-null.
1817 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
1818 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1819 __ cmp(r1, ip);
1820 loop.Branch(ne);
1821 // The enum cache is valid. Load the map of the object being
1822 // iterated over and use the cache for the iteration.
1823 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
1824 use_cache.Jump();
1825
1826 call_runtime.Bind();
1827 // Call the runtime to get the property names for the object.
1828 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1830
Steve Blockd0582a62009-12-15 09:54:21 +00001831 // If we got a map from the runtime call, we can do a fast
1832 // modification check. Otherwise, we got a fixed array, and we have
1833 // to do a slow check.
1834 // r0: map or fixed array (result from call to
1835 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00001836 __ mov(r2, Operand(r0));
1837 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1838 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1839 __ cmp(r1, ip);
1840 fixed_array.Branch(ne);
1841
Steve Blockd0582a62009-12-15 09:54:21 +00001842 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001843 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001844 // r0: map (either the result from a call to
1845 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1846 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 __ mov(r1, Operand(r0));
1848 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1849 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1850 __ ldr(r2,
1851 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1852
1853 frame_->EmitPush(r0); // map
1854 frame_->EmitPush(r2); // enum cache bridge cache
1855 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1856 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1857 frame_->EmitPush(r0);
1858 __ mov(r0, Operand(Smi::FromInt(0)));
1859 frame_->EmitPush(r0);
1860 entry.Jump();
1861
1862 fixed_array.Bind();
1863 __ mov(r1, Operand(Smi::FromInt(0)));
1864 frame_->EmitPush(r1); // insert 0 in place of Map
1865 frame_->EmitPush(r0);
1866
1867 // Push the length of the array and the initial index onto the stack.
1868 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1869 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1870 frame_->EmitPush(r0);
1871 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1872 frame_->EmitPush(r0);
1873
1874 // Condition.
1875 entry.Bind();
1876 // sp[0] : index
1877 // sp[1] : array/enum cache length
1878 // sp[2] : array or enum cache
1879 // sp[3] : 0 or map
1880 // sp[4] : enumerable
1881 // Grab the current frame's height for the break and continue
1882 // targets only after all the state is pushed on the frame.
1883 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1884 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1885
1886 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1887 __ ldr(r1, frame_->ElementAt(1)); // load the length
1888 __ cmp(r0, Operand(r1)); // compare to the array length
1889 node->break_target()->Branch(hs);
1890
1891 __ ldr(r0, frame_->ElementAt(0));
1892
1893 // Get the i'th entry of the array.
1894 __ ldr(r2, frame_->ElementAt(2));
1895 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1896 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1897
1898 // Get Map or 0.
1899 __ ldr(r2, frame_->ElementAt(3));
1900 // Check if this (still) matches the map of the enumerable.
1901 // If not, we have to filter the key.
1902 __ ldr(r1, frame_->ElementAt(4));
1903 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1904 __ cmp(r1, Operand(r2));
1905 end_del_check.Branch(eq);
1906
1907 // Convert the entry to a string (or null if it isn't a property anymore).
1908 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1909 frame_->EmitPush(r0);
1910 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00001911 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 __ mov(r3, Operand(r0));
1913
1914 // If the property has been removed while iterating, we just skip it.
1915 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1916 __ cmp(r3, ip);
1917 node->continue_target()->Branch(eq);
1918
1919 end_del_check.Bind();
1920 // Store the entry in the 'each' expression and take another spin in the
1921 // loop. r3: i'th entry of the enum cache (or string there of)
1922 frame_->EmitPush(r3); // push entry
1923 { Reference each(this, node->each());
1924 if (!each.is_illegal()) {
1925 if (each.size() > 0) {
1926 __ ldr(r0, frame_->ElementAt(each.size()));
1927 frame_->EmitPush(r0);
1928 }
1929 // If the reference was to a slot we rely on the convenient property
1930 // that it doesn't matter whether a value (eg, r3 pushed above) is
1931 // right on top of or right underneath a zero-sized reference.
1932 each.SetValue(NOT_CONST_INIT);
1933 if (each.size() > 0) {
1934 // It's safe to pop the value lying on top of the reference before
1935 // unloading the reference itself (which preserves the top of stack,
1936 // ie, now the topmost value of the non-zero sized reference), since
1937 // we will discard the top of stack after unloading the reference
1938 // anyway.
1939 frame_->EmitPop(r0);
1940 }
1941 }
1942 }
1943 // Discard the i'th entry pushed above or else the remainder of the
1944 // reference, whichever is currently on top of the stack.
1945 frame_->Drop();
1946
1947 // Body.
1948 CheckStack(); // TODO(1222600): ignore if body contains calls.
1949 VisitAndSpill(node->body());
1950
1951 // Next. Reestablish a spilled frame in case we are coming here via
1952 // a continue in the body.
1953 node->continue_target()->Bind();
1954 frame_->SpillAll();
1955 frame_->EmitPop(r0);
1956 __ add(r0, r0, Operand(Smi::FromInt(1)));
1957 frame_->EmitPush(r0);
1958 entry.Jump();
1959
1960 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1961 // any frame.
1962 node->break_target()->Bind();
1963 frame_->Drop(5);
1964
1965 // Exit.
1966 exit.Bind();
1967 node->continue_target()->Unuse();
1968 node->break_target()->Unuse();
1969 ASSERT(frame_->height() == original_height);
1970}
1971
1972
Steve Block3ce2e202009-11-05 08:53:23 +00001973void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001974#ifdef DEBUG
1975 int original_height = frame_->height();
1976#endif
1977 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001978 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001979 CodeForStatementPosition(node);
1980
1981 JumpTarget try_block;
1982 JumpTarget exit;
1983
1984 try_block.Call();
1985 // --- Catch block ---
1986 frame_->EmitPush(r0);
1987
1988 // Store the caught exception in the catch variable.
1989 { Reference ref(this, node->catch_var());
1990 ASSERT(ref.is_slot());
1991 // Here we make use of the convenient property that it doesn't matter
1992 // whether a value is immediately on top of or underneath a zero-sized
1993 // reference.
1994 ref.SetValue(NOT_CONST_INIT);
1995 }
1996
1997 // Remove the exception from the stack.
1998 frame_->Drop();
1999
2000 VisitStatementsAndSpill(node->catch_block()->statements());
2001 if (frame_ != NULL) {
2002 exit.Jump();
2003 }
2004
2005
2006 // --- Try block ---
2007 try_block.Bind();
2008
2009 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2010 int handler_height = frame_->height();
2011
2012 // Shadow the labels for all escapes from the try block, including
2013 // returns. During shadowing, the original label is hidden as the
2014 // LabelShadow and operations on the original actually affect the
2015 // shadowing label.
2016 //
2017 // We should probably try to unify the escaping labels and the return
2018 // label.
2019 int nof_escapes = node->escaping_targets()->length();
2020 List<ShadowTarget*> shadows(1 + nof_escapes);
2021
2022 // Add the shadow target for the function return.
2023 static const int kReturnShadowIndex = 0;
2024 shadows.Add(new ShadowTarget(&function_return_));
2025 bool function_return_was_shadowed = function_return_is_shadowed_;
2026 function_return_is_shadowed_ = true;
2027 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2028
2029 // Add the remaining shadow targets.
2030 for (int i = 0; i < nof_escapes; i++) {
2031 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2032 }
2033
2034 // Generate code for the statements in the try block.
2035 VisitStatementsAndSpill(node->try_block()->statements());
2036
2037 // Stop the introduced shadowing and count the number of required unlinks.
2038 // After shadowing stops, the original labels are unshadowed and the
2039 // LabelShadows represent the formerly shadowing labels.
2040 bool has_unlinks = false;
2041 for (int i = 0; i < shadows.length(); i++) {
2042 shadows[i]->StopShadowing();
2043 has_unlinks = has_unlinks || shadows[i]->is_linked();
2044 }
2045 function_return_is_shadowed_ = function_return_was_shadowed;
2046
2047 // Get an external reference to the handler address.
2048 ExternalReference handler_address(Top::k_handler_address);
2049
2050 // If we can fall off the end of the try block, unlink from try chain.
2051 if (has_valid_frame()) {
2052 // The next handler address is on top of the frame. Unlink from
2053 // the handler list and drop the rest of this handler from the
2054 // frame.
2055 ASSERT(StackHandlerConstants::kNextOffset == 0);
2056 frame_->EmitPop(r1);
2057 __ mov(r3, Operand(handler_address));
2058 __ str(r1, MemOperand(r3));
2059 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2060 if (has_unlinks) {
2061 exit.Jump();
2062 }
2063 }
2064
2065 // Generate unlink code for the (formerly) shadowing labels that have been
2066 // jumped to. Deallocate each shadow target.
2067 for (int i = 0; i < shadows.length(); i++) {
2068 if (shadows[i]->is_linked()) {
2069 // Unlink from try chain;
2070 shadows[i]->Bind();
2071 // Because we can be jumping here (to spilled code) from unspilled
2072 // code, we need to reestablish a spilled frame at this block.
2073 frame_->SpillAll();
2074
2075 // Reload sp from the top handler, because some statements that we
2076 // break from (eg, for...in) may have left stuff on the stack.
2077 __ mov(r3, Operand(handler_address));
2078 __ ldr(sp, MemOperand(r3));
2079 frame_->Forget(frame_->height() - handler_height);
2080
2081 ASSERT(StackHandlerConstants::kNextOffset == 0);
2082 frame_->EmitPop(r1);
2083 __ str(r1, MemOperand(r3));
2084 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2085
2086 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2087 frame_->PrepareForReturn();
2088 }
2089 shadows[i]->other_target()->Jump();
2090 }
2091 }
2092
2093 exit.Bind();
2094 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2095}
2096
2097
Steve Block3ce2e202009-11-05 08:53:23 +00002098void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002099#ifdef DEBUG
2100 int original_height = frame_->height();
2101#endif
2102 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002103 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002104 CodeForStatementPosition(node);
2105
2106 // State: Used to keep track of reason for entering the finally
2107 // block. Should probably be extended to hold information for
2108 // break/continue from within the try block.
2109 enum { FALLING, THROWING, JUMPING };
2110
2111 JumpTarget try_block;
2112 JumpTarget finally_block;
2113
2114 try_block.Call();
2115
2116 frame_->EmitPush(r0); // save exception object on the stack
2117 // In case of thrown exceptions, this is where we continue.
2118 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2119 finally_block.Jump();
2120
2121 // --- Try block ---
2122 try_block.Bind();
2123
2124 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2125 int handler_height = frame_->height();
2126
2127 // Shadow the labels for all escapes from the try block, including
2128 // returns. Shadowing hides the original label as the LabelShadow and
2129 // operations on the original actually affect the shadowing label.
2130 //
2131 // We should probably try to unify the escaping labels and the return
2132 // label.
2133 int nof_escapes = node->escaping_targets()->length();
2134 List<ShadowTarget*> shadows(1 + nof_escapes);
2135
2136 // Add the shadow target for the function return.
2137 static const int kReturnShadowIndex = 0;
2138 shadows.Add(new ShadowTarget(&function_return_));
2139 bool function_return_was_shadowed = function_return_is_shadowed_;
2140 function_return_is_shadowed_ = true;
2141 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2142
2143 // Add the remaining shadow targets.
2144 for (int i = 0; i < nof_escapes; i++) {
2145 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2146 }
2147
2148 // Generate code for the statements in the try block.
2149 VisitStatementsAndSpill(node->try_block()->statements());
2150
2151 // Stop the introduced shadowing and count the number of required unlinks.
2152 // After shadowing stops, the original labels are unshadowed and the
2153 // LabelShadows represent the formerly shadowing labels.
2154 int nof_unlinks = 0;
2155 for (int i = 0; i < shadows.length(); i++) {
2156 shadows[i]->StopShadowing();
2157 if (shadows[i]->is_linked()) nof_unlinks++;
2158 }
2159 function_return_is_shadowed_ = function_return_was_shadowed;
2160
2161 // Get an external reference to the handler address.
2162 ExternalReference handler_address(Top::k_handler_address);
2163
2164 // If we can fall off the end of the try block, unlink from the try
2165 // chain and set the state on the frame to FALLING.
2166 if (has_valid_frame()) {
2167 // The next handler address is on top of the frame.
2168 ASSERT(StackHandlerConstants::kNextOffset == 0);
2169 frame_->EmitPop(r1);
2170 __ mov(r3, Operand(handler_address));
2171 __ str(r1, MemOperand(r3));
2172 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2173
2174 // Fake a top of stack value (unneeded when FALLING) and set the
2175 // state in r2, then jump around the unlink blocks if any.
2176 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2177 frame_->EmitPush(r0);
2178 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2179 if (nof_unlinks > 0) {
2180 finally_block.Jump();
2181 }
2182 }
2183
2184 // Generate code to unlink and set the state for the (formerly)
2185 // shadowing targets that have been jumped to.
2186 for (int i = 0; i < shadows.length(); i++) {
2187 if (shadows[i]->is_linked()) {
2188 // If we have come from the shadowed return, the return value is
2189 // in (a non-refcounted reference to) r0. We must preserve it
2190 // until it is pushed.
2191 //
2192 // Because we can be jumping here (to spilled code) from
2193 // unspilled code, we need to reestablish a spilled frame at
2194 // this block.
2195 shadows[i]->Bind();
2196 frame_->SpillAll();
2197
2198 // Reload sp from the top handler, because some statements that
2199 // we break from (eg, for...in) may have left stuff on the
2200 // stack.
2201 __ mov(r3, Operand(handler_address));
2202 __ ldr(sp, MemOperand(r3));
2203 frame_->Forget(frame_->height() - handler_height);
2204
2205 // Unlink this handler and drop it from the frame. The next
2206 // handler address is currently on top of the frame.
2207 ASSERT(StackHandlerConstants::kNextOffset == 0);
2208 frame_->EmitPop(r1);
2209 __ str(r1, MemOperand(r3));
2210 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2211
2212 if (i == kReturnShadowIndex) {
2213 // If this label shadowed the function return, materialize the
2214 // return value on the stack.
2215 frame_->EmitPush(r0);
2216 } else {
2217 // Fake TOS for targets that shadowed breaks and continues.
2218 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2219 frame_->EmitPush(r0);
2220 }
2221 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2222 if (--nof_unlinks > 0) {
2223 // If this is not the last unlink block, jump around the next.
2224 finally_block.Jump();
2225 }
2226 }
2227 }
2228
2229 // --- Finally block ---
2230 finally_block.Bind();
2231
2232 // Push the state on the stack.
2233 frame_->EmitPush(r2);
2234
2235 // We keep two elements on the stack - the (possibly faked) result
2236 // and the state - while evaluating the finally block.
2237 //
2238 // Generate code for the statements in the finally block.
2239 VisitStatementsAndSpill(node->finally_block()->statements());
2240
2241 if (has_valid_frame()) {
2242 // Restore state and return value or faked TOS.
2243 frame_->EmitPop(r2);
2244 frame_->EmitPop(r0);
2245 }
2246
2247 // Generate code to jump to the right destination for all used
2248 // formerly shadowing targets. Deallocate each shadow target.
2249 for (int i = 0; i < shadows.length(); i++) {
2250 if (has_valid_frame() && shadows[i]->is_bound()) {
2251 JumpTarget* original = shadows[i]->other_target();
2252 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2253 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2254 JumpTarget skip;
2255 skip.Branch(ne);
2256 frame_->PrepareForReturn();
2257 original->Jump();
2258 skip.Bind();
2259 } else {
2260 original->Branch(eq);
2261 }
2262 }
2263 }
2264
2265 if (has_valid_frame()) {
2266 // Check if we need to rethrow the exception.
2267 JumpTarget exit;
2268 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2269 exit.Branch(ne);
2270
2271 // Rethrow exception.
2272 frame_->EmitPush(r0);
2273 frame_->CallRuntime(Runtime::kReThrow, 1);
2274
2275 // Done.
2276 exit.Bind();
2277 }
2278 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2279}
2280
2281
2282void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2283#ifdef DEBUG
2284 int original_height = frame_->height();
2285#endif
2286 VirtualFrame::SpilledScope spilled_scope;
2287 Comment cmnt(masm_, "[ DebuggerStatament");
2288 CodeForStatementPosition(node);
2289#ifdef ENABLE_DEBUGGER_SUPPORT
2290 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2291#endif
2292 // Ignore the return value.
2293 ASSERT(frame_->height() == original_height);
2294}
2295
2296
2297void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2298 VirtualFrame::SpilledScope spilled_scope;
2299 ASSERT(boilerplate->IsBoilerplate());
2300
Steve Blocka7e24c12009-10-30 11:49:00 +00002301 // Create a new closure.
2302 frame_->EmitPush(cp);
Steve Block3ce2e202009-11-05 08:53:23 +00002303 __ mov(r0, Operand(boilerplate));
2304 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002305 frame_->CallRuntime(Runtime::kNewClosure, 2);
2306 frame_->EmitPush(r0);
2307}
2308
2309
2310void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2311#ifdef DEBUG
2312 int original_height = frame_->height();
2313#endif
2314 VirtualFrame::SpilledScope spilled_scope;
2315 Comment cmnt(masm_, "[ FunctionLiteral");
2316
2317 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002318 Handle<JSFunction> boilerplate =
2319 Compiler::BuildBoilerplate(node, script_, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002320 // Check for stack-overflow exception.
2321 if (HasStackOverflow()) {
2322 ASSERT(frame_->height() == original_height);
2323 return;
2324 }
2325 InstantiateBoilerplate(boilerplate);
2326 ASSERT(frame_->height() == original_height + 1);
2327}
2328
2329
2330void CodeGenerator::VisitFunctionBoilerplateLiteral(
2331 FunctionBoilerplateLiteral* node) {
2332#ifdef DEBUG
2333 int original_height = frame_->height();
2334#endif
2335 VirtualFrame::SpilledScope spilled_scope;
2336 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2337 InstantiateBoilerplate(node->boilerplate());
2338 ASSERT(frame_->height() == original_height + 1);
2339}
2340
2341
2342void CodeGenerator::VisitConditional(Conditional* node) {
2343#ifdef DEBUG
2344 int original_height = frame_->height();
2345#endif
2346 VirtualFrame::SpilledScope spilled_scope;
2347 Comment cmnt(masm_, "[ Conditional");
2348 JumpTarget then;
2349 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002350 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002351 if (has_valid_frame()) {
2352 Branch(false, &else_);
2353 }
2354 if (has_valid_frame() || then.is_linked()) {
2355 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002356 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002357 }
2358 if (else_.is_linked()) {
2359 JumpTarget exit;
2360 if (has_valid_frame()) exit.Jump();
2361 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002362 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002363 if (exit.is_linked()) exit.Bind();
2364 }
2365 ASSERT(frame_->height() == original_height + 1);
2366}
2367
2368
2369void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2370 VirtualFrame::SpilledScope spilled_scope;
2371 if (slot->type() == Slot::LOOKUP) {
2372 ASSERT(slot->var()->is_dynamic());
2373
2374 JumpTarget slow;
2375 JumpTarget done;
2376
2377 // Generate fast-case code for variables that might be shadowed by
2378 // eval-introduced variables. Eval is used a lot without
2379 // introducing variables. In those cases, we do not want to
2380 // perform a runtime call for all variables in the scope
2381 // containing the eval.
2382 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2383 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2384 // If there was no control flow to slow, we can exit early.
2385 if (!slow.is_linked()) {
2386 frame_->EmitPush(r0);
2387 return;
2388 }
2389
2390 done.Jump();
2391
2392 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2393 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2394 // Only generate the fast case for locals that rewrite to slots.
2395 // This rules out argument loads.
2396 if (potential_slot != NULL) {
2397 __ ldr(r0,
2398 ContextSlotOperandCheckExtensions(potential_slot,
2399 r1,
2400 r2,
2401 &slow));
2402 if (potential_slot->var()->mode() == Variable::CONST) {
2403 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2404 __ cmp(r0, ip);
2405 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2406 }
2407 // There is always control flow to slow from
2408 // ContextSlotOperandCheckExtensions so we have to jump around
2409 // it.
2410 done.Jump();
2411 }
2412 }
2413
2414 slow.Bind();
2415 frame_->EmitPush(cp);
2416 __ mov(r0, Operand(slot->var()->name()));
2417 frame_->EmitPush(r0);
2418
2419 if (typeof_state == INSIDE_TYPEOF) {
2420 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2421 } else {
2422 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2423 }
2424
2425 done.Bind();
2426 frame_->EmitPush(r0);
2427
2428 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00002429 // Special handling for locals allocated in registers.
2430 __ ldr(r0, SlotOperand(slot, r2));
2431 frame_->EmitPush(r0);
2432 if (slot->var()->mode() == Variable::CONST) {
2433 // Const slots may contain 'the hole' value (the constant hasn't been
2434 // initialized yet) which needs to be converted into the 'undefined'
2435 // value.
2436 Comment cmnt(masm_, "[ Unhole const");
2437 frame_->EmitPop(r0);
2438 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2439 __ cmp(r0, ip);
2440 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2441 frame_->EmitPush(r0);
2442 }
2443 }
2444}
2445
2446
2447void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2448 TypeofState typeof_state,
2449 Register tmp,
2450 Register tmp2,
2451 JumpTarget* slow) {
2452 // Check that no extension objects have been created by calls to
2453 // eval from the current scope to the global scope.
2454 Register context = cp;
2455 Scope* s = scope();
2456 while (s != NULL) {
2457 if (s->num_heap_slots() > 0) {
2458 if (s->calls_eval()) {
2459 // Check that extension is NULL.
2460 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2461 __ tst(tmp2, tmp2);
2462 slow->Branch(ne);
2463 }
2464 // Load next context in chain.
2465 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2466 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2467 context = tmp;
2468 }
2469 // If no outer scope calls eval, we do not need to check more
2470 // context extensions.
2471 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2472 s = s->outer_scope();
2473 }
2474
2475 if (s->is_eval_scope()) {
2476 Label next, fast;
2477 if (!context.is(tmp)) {
2478 __ mov(tmp, Operand(context));
2479 }
2480 __ bind(&next);
2481 // Terminate at global context.
2482 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2483 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2484 __ cmp(tmp2, ip);
2485 __ b(eq, &fast);
2486 // Check that extension is NULL.
2487 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2488 __ tst(tmp2, tmp2);
2489 slow->Branch(ne);
2490 // Load next context in chain.
2491 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2492 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2493 __ b(&next);
2494 __ bind(&fast);
2495 }
2496
2497 // All extension objects were empty and it is safe to use a global
2498 // load IC call.
2499 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2500 // Load the global object.
2501 LoadGlobal();
2502 // Setup the name register.
2503 Result name(r2);
2504 __ mov(r2, Operand(slot->var()->name()));
2505 // Call IC stub.
2506 if (typeof_state == INSIDE_TYPEOF) {
2507 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
2508 } else {
2509 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
2510 }
2511
2512 // Drop the global object. The result is in r0.
2513 frame_->Drop();
2514}
2515
2516
2517void CodeGenerator::VisitSlot(Slot* node) {
2518#ifdef DEBUG
2519 int original_height = frame_->height();
2520#endif
2521 VirtualFrame::SpilledScope spilled_scope;
2522 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002523 LoadFromSlot(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002524 ASSERT(frame_->height() == original_height + 1);
2525}
2526
2527
2528void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2529#ifdef DEBUG
2530 int original_height = frame_->height();
2531#endif
2532 VirtualFrame::SpilledScope spilled_scope;
2533 Comment cmnt(masm_, "[ VariableProxy");
2534
2535 Variable* var = node->var();
2536 Expression* expr = var->rewrite();
2537 if (expr != NULL) {
2538 Visit(expr);
2539 } else {
2540 ASSERT(var->is_global());
2541 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002542 ref.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002543 }
2544 ASSERT(frame_->height() == original_height + 1);
2545}
2546
2547
2548void CodeGenerator::VisitLiteral(Literal* node) {
2549#ifdef DEBUG
2550 int original_height = frame_->height();
2551#endif
2552 VirtualFrame::SpilledScope spilled_scope;
2553 Comment cmnt(masm_, "[ Literal");
2554 __ mov(r0, Operand(node->handle()));
2555 frame_->EmitPush(r0);
2556 ASSERT(frame_->height() == original_height + 1);
2557}
2558
2559
2560void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2561#ifdef DEBUG
2562 int original_height = frame_->height();
2563#endif
2564 VirtualFrame::SpilledScope spilled_scope;
2565 Comment cmnt(masm_, "[ RexExp Literal");
2566
2567 // Retrieve the literal array and check the allocated entry.
2568
2569 // Load the function of this activation.
2570 __ ldr(r1, frame_->Function());
2571
2572 // Load the literals array of the function.
2573 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2574
2575 // Load the literal at the ast saved index.
2576 int literal_offset =
2577 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2578 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2579
2580 JumpTarget done;
2581 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2582 __ cmp(r2, ip);
2583 done.Branch(ne);
2584
2585 // If the entry is undefined we call the runtime system to computed
2586 // the literal.
2587 frame_->EmitPush(r1); // literal array (0)
2588 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2589 frame_->EmitPush(r0); // literal index (1)
2590 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2591 frame_->EmitPush(r0);
2592 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2593 frame_->EmitPush(r0);
2594 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2595 __ mov(r2, Operand(r0));
2596
2597 done.Bind();
2598 // Push the literal.
2599 frame_->EmitPush(r2);
2600 ASSERT(frame_->height() == original_height + 1);
2601}
2602
2603
2604// This deferred code stub will be used for creating the boilerplate
2605// by calling Runtime_CreateObjectLiteralBoilerplate.
2606// Each created boilerplate is stored in the JSFunction and they are
2607// therefore context dependent.
2608class DeferredObjectLiteral: public DeferredCode {
2609 public:
2610 explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
2611 set_comment("[ DeferredObjectLiteral");
2612 }
2613
2614 virtual void Generate();
2615
2616 private:
2617 ObjectLiteral* node_;
2618};
2619
2620
2621void DeferredObjectLiteral::Generate() {
2622 // Argument is passed in r1.
2623
2624 // If the entry is undefined we call the runtime system to compute
2625 // the literal.
2626 // Literal array (0).
2627 __ push(r1);
2628 // Literal index (1).
2629 __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
2630 __ push(r0);
2631 // Constant properties (2).
2632 __ mov(r0, Operand(node_->constant_properties()));
2633 __ push(r0);
2634 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
2635 __ mov(r2, Operand(r0));
2636 // Result is returned in r2.
2637}
2638
2639
2640void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2641#ifdef DEBUG
2642 int original_height = frame_->height();
2643#endif
2644 VirtualFrame::SpilledScope spilled_scope;
2645 Comment cmnt(masm_, "[ ObjectLiteral");
2646
2647 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
2648
2649 // Retrieve the literal array and check the allocated entry.
2650
2651 // Load the function of this activation.
2652 __ ldr(r1, frame_->Function());
2653
2654 // Load the literals array of the function.
2655 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2656
2657 // Load the literal at the ast saved index.
2658 int literal_offset =
2659 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2660 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2661
2662 // Check whether we need to materialize the object literal boilerplate.
2663 // If so, jump to the deferred code.
2664 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2665 __ cmp(r2, Operand(ip));
2666 deferred->Branch(eq);
2667 deferred->BindExit();
2668
2669 // Push the object literal boilerplate.
2670 frame_->EmitPush(r2);
2671
2672 // Clone the boilerplate object.
2673 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2674 if (node->depth() == 1) {
2675 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2676 }
2677 frame_->CallRuntime(clone_function_id, 1);
2678 frame_->EmitPush(r0); // save the result
2679 // r0: cloned object literal
2680
2681 for (int i = 0; i < node->properties()->length(); i++) {
2682 ObjectLiteral::Property* property = node->properties()->at(i);
2683 Literal* key = property->key();
2684 Expression* value = property->value();
2685 switch (property->kind()) {
2686 case ObjectLiteral::Property::CONSTANT:
2687 break;
2688 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2689 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2690 // else fall through
2691 case ObjectLiteral::Property::COMPUTED: // fall through
2692 case ObjectLiteral::Property::PROTOTYPE: {
2693 frame_->EmitPush(r0); // dup the result
2694 LoadAndSpill(key);
2695 LoadAndSpill(value);
2696 frame_->CallRuntime(Runtime::kSetProperty, 3);
2697 // restore r0
2698 __ ldr(r0, frame_->Top());
2699 break;
2700 }
2701 case ObjectLiteral::Property::SETTER: {
2702 frame_->EmitPush(r0);
2703 LoadAndSpill(key);
2704 __ mov(r0, Operand(Smi::FromInt(1)));
2705 frame_->EmitPush(r0);
2706 LoadAndSpill(value);
2707 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2708 __ ldr(r0, frame_->Top());
2709 break;
2710 }
2711 case ObjectLiteral::Property::GETTER: {
2712 frame_->EmitPush(r0);
2713 LoadAndSpill(key);
2714 __ mov(r0, Operand(Smi::FromInt(0)));
2715 frame_->EmitPush(r0);
2716 LoadAndSpill(value);
2717 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2718 __ ldr(r0, frame_->Top());
2719 break;
2720 }
2721 }
2722 }
2723 ASSERT(frame_->height() == original_height + 1);
2724}
2725
2726
2727// This deferred code stub will be used for creating the boilerplate
2728// by calling Runtime_CreateArrayLiteralBoilerplate.
2729// Each created boilerplate is stored in the JSFunction and they are
2730// therefore context dependent.
2731class DeferredArrayLiteral: public DeferredCode {
2732 public:
2733 explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
2734 set_comment("[ DeferredArrayLiteral");
2735 }
2736
2737 virtual void Generate();
2738
2739 private:
2740 ArrayLiteral* node_;
2741};
2742
2743
2744void DeferredArrayLiteral::Generate() {
2745 // Argument is passed in r1.
2746
2747 // If the entry is undefined we call the runtime system to computed
2748 // the literal.
2749 // Literal array (0).
2750 __ push(r1);
2751 // Literal index (1).
2752 __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
2753 __ push(r0);
2754 // Constant properties (2).
2755 __ mov(r0, Operand(node_->literals()));
2756 __ push(r0);
2757 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
2758 __ mov(r2, Operand(r0));
2759 // Result is returned in r2.
2760}
2761
2762
2763void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2764#ifdef DEBUG
2765 int original_height = frame_->height();
2766#endif
2767 VirtualFrame::SpilledScope spilled_scope;
2768 Comment cmnt(masm_, "[ ArrayLiteral");
2769
2770 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
2771
2772 // Retrieve the literal array and check the allocated entry.
2773
2774 // Load the function of this activation.
2775 __ ldr(r1, frame_->Function());
2776
2777 // Load the literals array of the function.
2778 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2779
2780 // Load the literal at the ast saved index.
2781 int literal_offset =
2782 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2783 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2784
2785 // Check whether we need to materialize the object literal boilerplate.
2786 // If so, jump to the deferred code.
2787 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2788 __ cmp(r2, Operand(ip));
2789 deferred->Branch(eq);
2790 deferred->BindExit();
2791
2792 // Push the object literal boilerplate.
2793 frame_->EmitPush(r2);
2794
2795 // Clone the boilerplate object.
2796 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
2797 if (node->depth() == 1) {
2798 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
2799 }
2800 frame_->CallRuntime(clone_function_id, 1);
2801 frame_->EmitPush(r0); // save the result
2802 // r0: cloned object literal
2803
2804 // Generate code to set the elements in the array that are not
2805 // literals.
2806 for (int i = 0; i < node->values()->length(); i++) {
2807 Expression* value = node->values()->at(i);
2808
2809 // If value is a literal the property value is already set in the
2810 // boilerplate object.
2811 if (value->AsLiteral() != NULL) continue;
2812 // If value is a materialized literal the property value is already set
2813 // in the boilerplate object if it is simple.
2814 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2815
2816 // The property must be set by generated code.
2817 LoadAndSpill(value);
2818 frame_->EmitPop(r0);
2819
2820 // Fetch the object literal.
2821 __ ldr(r1, frame_->Top());
2822 // Get the elements array.
2823 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2824
2825 // Write to the indexed properties array.
2826 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2827 __ str(r0, FieldMemOperand(r1, offset));
2828
2829 // Update the write barrier for the array address.
2830 __ mov(r3, Operand(offset));
2831 __ RecordWrite(r1, r3, r2);
2832 }
2833 ASSERT(frame_->height() == original_height + 1);
2834}
2835
2836
2837void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2838#ifdef DEBUG
2839 int original_height = frame_->height();
2840#endif
2841 VirtualFrame::SpilledScope spilled_scope;
2842 // Call runtime routine to allocate the catch extension object and
2843 // assign the exception value to the catch variable.
2844 Comment cmnt(masm_, "[ CatchExtensionObject");
2845 LoadAndSpill(node->key());
2846 LoadAndSpill(node->value());
2847 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2848 frame_->EmitPush(r0);
2849 ASSERT(frame_->height() == original_height + 1);
2850}
2851
2852
2853void CodeGenerator::VisitAssignment(Assignment* node) {
2854#ifdef DEBUG
2855 int original_height = frame_->height();
2856#endif
2857 VirtualFrame::SpilledScope spilled_scope;
2858 Comment cmnt(masm_, "[ Assignment");
2859
2860 { Reference target(this, node->target());
2861 if (target.is_illegal()) {
2862 // Fool the virtual frame into thinking that we left the assignment's
2863 // value on the frame.
2864 __ mov(r0, Operand(Smi::FromInt(0)));
2865 frame_->EmitPush(r0);
2866 ASSERT(frame_->height() == original_height + 1);
2867 return;
2868 }
2869
2870 if (node->op() == Token::ASSIGN ||
2871 node->op() == Token::INIT_VAR ||
2872 node->op() == Token::INIT_CONST) {
2873 LoadAndSpill(node->value());
2874
2875 } else {
2876 // +=, *= and similar binary assignments.
2877 // Get the old value of the lhs.
Steve Blockd0582a62009-12-15 09:54:21 +00002878 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002879 Literal* literal = node->value()->AsLiteral();
2880 bool overwrite =
2881 (node->value()->AsBinaryOperation() != NULL &&
2882 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2883 if (literal != NULL && literal->handle()->IsSmi()) {
2884 SmiOperation(node->binary_op(),
2885 literal->handle(),
2886 false,
2887 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2888 frame_->EmitPush(r0);
2889
2890 } else {
2891 LoadAndSpill(node->value());
2892 GenericBinaryOperation(node->binary_op(),
2893 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2894 frame_->EmitPush(r0);
2895 }
2896 }
2897
2898 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2899 if (var != NULL &&
2900 (var->mode() == Variable::CONST) &&
2901 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2902 // Assignment ignored - leave the value on the stack.
2903
2904 } else {
2905 CodeForSourcePosition(node->position());
2906 if (node->op() == Token::INIT_CONST) {
2907 // Dynamic constant initializations must use the function context
2908 // and initialize the actual constant declared. Dynamic variable
2909 // initializations are simply assignments and use SetValue.
2910 target.SetValue(CONST_INIT);
2911 } else {
2912 target.SetValue(NOT_CONST_INIT);
2913 }
2914 }
2915 }
2916 ASSERT(frame_->height() == original_height + 1);
2917}
2918
2919
2920void CodeGenerator::VisitThrow(Throw* node) {
2921#ifdef DEBUG
2922 int original_height = frame_->height();
2923#endif
2924 VirtualFrame::SpilledScope spilled_scope;
2925 Comment cmnt(masm_, "[ Throw");
2926
2927 LoadAndSpill(node->exception());
2928 CodeForSourcePosition(node->position());
2929 frame_->CallRuntime(Runtime::kThrow, 1);
2930 frame_->EmitPush(r0);
2931 ASSERT(frame_->height() == original_height + 1);
2932}
2933
2934
2935void CodeGenerator::VisitProperty(Property* node) {
2936#ifdef DEBUG
2937 int original_height = frame_->height();
2938#endif
2939 VirtualFrame::SpilledScope spilled_scope;
2940 Comment cmnt(masm_, "[ Property");
2941
2942 { Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002943 property.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002944 }
2945 ASSERT(frame_->height() == original_height + 1);
2946}
2947
2948
2949void CodeGenerator::VisitCall(Call* node) {
2950#ifdef DEBUG
2951 int original_height = frame_->height();
2952#endif
2953 VirtualFrame::SpilledScope spilled_scope;
2954 Comment cmnt(masm_, "[ Call");
2955
2956 Expression* function = node->expression();
2957 ZoneList<Expression*>* args = node->arguments();
2958
2959 // Standard function call.
2960 // Check if the function is a variable or a property.
2961 Variable* var = function->AsVariableProxy()->AsVariable();
2962 Property* property = function->AsProperty();
2963
2964 // ------------------------------------------------------------------------
2965 // Fast-case: Use inline caching.
2966 // ---
2967 // According to ECMA-262, section 11.2.3, page 44, the function to call
2968 // must be resolved after the arguments have been evaluated. The IC code
2969 // automatically handles this by loading the arguments before the function
2970 // is resolved in cache misses (this also holds for megamorphic calls).
2971 // ------------------------------------------------------------------------
2972
2973 if (var != NULL && var->is_possibly_eval()) {
2974 // ----------------------------------
2975 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2976 // ----------------------------------
2977
2978 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2979 // resolve the function we need to call and the receiver of the
2980 // call. Then we call the resolved function using the given
2981 // arguments.
2982 // Prepare stack for call to resolved function.
2983 LoadAndSpill(function);
2984 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2985 frame_->EmitPush(r2); // Slot for receiver
2986 int arg_count = args->length();
2987 for (int i = 0; i < arg_count; i++) {
2988 LoadAndSpill(args->at(i));
2989 }
2990
2991 // Prepare stack for call to ResolvePossiblyDirectEval.
2992 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2993 frame_->EmitPush(r1);
2994 if (arg_count > 0) {
2995 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
2996 frame_->EmitPush(r1);
2997 } else {
2998 frame_->EmitPush(r2);
2999 }
3000
3001 // Resolve the call.
3002 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
3003
3004 // Touch up stack with the right values for the function and the receiver.
3005 __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
3006 __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
3007 __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
3008 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3009
3010 // Call the function.
3011 CodeForSourcePosition(node->position());
3012
3013 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3014 CallFunctionStub call_function(arg_count, in_loop);
3015 frame_->CallStub(&call_function, arg_count + 1);
3016
3017 __ ldr(cp, frame_->Context());
3018 // Remove the function from the stack.
3019 frame_->Drop();
3020 frame_->EmitPush(r0);
3021
3022 } else if (var != NULL && !var->is_this() && var->is_global()) {
3023 // ----------------------------------
3024 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3025 // ----------------------------------
3026
3027 // Push the name of the function and the receiver onto the stack.
3028 __ mov(r0, Operand(var->name()));
3029 frame_->EmitPush(r0);
3030
3031 // Pass the global object as the receiver and let the IC stub
3032 // patch the stack to use the global proxy as 'this' in the
3033 // invoked function.
3034 LoadGlobal();
3035
3036 // Load the arguments.
3037 int arg_count = args->length();
3038 for (int i = 0; i < arg_count; i++) {
3039 LoadAndSpill(args->at(i));
3040 }
3041
3042 // Setup the receiver register and call the IC initialization code.
3043 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3044 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3045 CodeForSourcePosition(node->position());
3046 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3047 arg_count + 1);
3048 __ ldr(cp, frame_->Context());
3049 // Remove the function from the stack.
3050 frame_->Drop();
3051 frame_->EmitPush(r0);
3052
3053 } else if (var != NULL && var->slot() != NULL &&
3054 var->slot()->type() == Slot::LOOKUP) {
3055 // ----------------------------------
3056 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3057 // ----------------------------------
3058
3059 // Load the function
3060 frame_->EmitPush(cp);
3061 __ mov(r0, Operand(var->name()));
3062 frame_->EmitPush(r0);
3063 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3064 // r0: slot value; r1: receiver
3065
3066 // Load the receiver.
3067 frame_->EmitPush(r0); // function
3068 frame_->EmitPush(r1); // receiver
3069
3070 // Call the function.
3071 CallWithArguments(args, node->position());
3072 frame_->EmitPush(r0);
3073
3074 } else if (property != NULL) {
3075 // Check if the key is a literal string.
3076 Literal* literal = property->key()->AsLiteral();
3077
3078 if (literal != NULL && literal->handle()->IsSymbol()) {
3079 // ------------------------------------------------------------------
3080 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3081 // ------------------------------------------------------------------
3082
3083 // Push the name of the function and the receiver onto the stack.
3084 __ mov(r0, Operand(literal->handle()));
3085 frame_->EmitPush(r0);
3086 LoadAndSpill(property->obj());
3087
3088 // Load the arguments.
3089 int arg_count = args->length();
3090 for (int i = 0; i < arg_count; i++) {
3091 LoadAndSpill(args->at(i));
3092 }
3093
3094 // Set the receiver register and call the IC initialization code.
3095 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3096 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3097 CodeForSourcePosition(node->position());
3098 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3099 __ ldr(cp, frame_->Context());
3100
3101 // Remove the function from the stack.
3102 frame_->Drop();
3103
3104 frame_->EmitPush(r0); // push after get rid of function from the stack
3105
3106 } else {
3107 // -------------------------------------------
3108 // JavaScript example: 'array[index](1, 2, 3)'
3109 // -------------------------------------------
3110
3111 // Load the function to call from the property through a reference.
3112 Reference ref(this, property);
Steve Blockd0582a62009-12-15 09:54:21 +00003113 ref.GetValueAndSpill(); // receiver
Steve Blocka7e24c12009-10-30 11:49:00 +00003114
3115 // Pass receiver to called function.
3116 if (property->is_synthetic()) {
3117 LoadGlobalReceiver(r0);
3118 } else {
3119 __ ldr(r0, frame_->ElementAt(ref.size()));
3120 frame_->EmitPush(r0);
3121 }
3122
3123 // Call the function.
3124 CallWithArguments(args, node->position());
3125 frame_->EmitPush(r0);
3126 }
3127
3128 } else {
3129 // ----------------------------------
3130 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3131 // ----------------------------------
3132
3133 // Load the function.
3134 LoadAndSpill(function);
3135
3136 // Pass the global proxy as the receiver.
3137 LoadGlobalReceiver(r0);
3138
3139 // Call the function.
3140 CallWithArguments(args, node->position());
3141 frame_->EmitPush(r0);
3142 }
3143 ASSERT(frame_->height() == original_height + 1);
3144}
3145
3146
3147void CodeGenerator::VisitCallNew(CallNew* node) {
3148#ifdef DEBUG
3149 int original_height = frame_->height();
3150#endif
3151 VirtualFrame::SpilledScope spilled_scope;
3152 Comment cmnt(masm_, "[ CallNew");
3153
3154 // According to ECMA-262, section 11.2.2, page 44, the function
3155 // expression in new calls must be evaluated before the
3156 // arguments. This is different from ordinary calls, where the
3157 // actual function to call is resolved after the arguments have been
3158 // evaluated.
3159
3160 // Compute function to call and use the global object as the
3161 // receiver. There is no need to use the global proxy here because
3162 // it will always be replaced with a newly allocated object.
3163 LoadAndSpill(node->expression());
3164 LoadGlobal();
3165
3166 // Push the arguments ("left-to-right") on the stack.
3167 ZoneList<Expression*>* args = node->arguments();
3168 int arg_count = args->length();
3169 for (int i = 0; i < arg_count; i++) {
3170 LoadAndSpill(args->at(i));
3171 }
3172
3173 // r0: the number of arguments.
3174 Result num_args(r0);
3175 __ mov(r0, Operand(arg_count));
3176
3177 // Load the function into r1 as per calling convention.
3178 Result function(r1);
3179 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3180
3181 // Call the construct call builtin that handles allocation and
3182 // constructor invocation.
3183 CodeForSourcePosition(node->position());
3184 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
3185 frame_->CallCodeObject(ic,
3186 RelocInfo::CONSTRUCT_CALL,
3187 &num_args,
3188 &function,
3189 arg_count + 1);
3190
3191 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3192 __ str(r0, frame_->Top());
3193 ASSERT(frame_->height() == original_height + 1);
3194}
3195
3196
3197void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3198 VirtualFrame::SpilledScope spilled_scope;
3199 ASSERT(args->length() == 1);
3200 JumpTarget leave, null, function, non_function_constructor;
3201
3202 // Load the object into r0.
3203 LoadAndSpill(args->at(0));
3204 frame_->EmitPop(r0);
3205
3206 // If the object is a smi, we return null.
3207 __ tst(r0, Operand(kSmiTagMask));
3208 null.Branch(eq);
3209
3210 // Check that the object is a JS object but take special care of JS
3211 // functions to make sure they have 'Function' as their class.
3212 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3213 null.Branch(lt);
3214
3215 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3216 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3217 // LAST_JS_OBJECT_TYPE.
3218 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3219 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3220 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3221 function.Branch(eq);
3222
3223 // Check if the constructor in the map is a function.
3224 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3225 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3226 non_function_constructor.Branch(ne);
3227
3228 // The r0 register now contains the constructor function. Grab the
3229 // instance class name from there.
3230 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3231 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3232 frame_->EmitPush(r0);
3233 leave.Jump();
3234
3235 // Functions have class 'Function'.
3236 function.Bind();
3237 __ mov(r0, Operand(Factory::function_class_symbol()));
3238 frame_->EmitPush(r0);
3239 leave.Jump();
3240
3241 // Objects with a non-function constructor have class 'Object'.
3242 non_function_constructor.Bind();
3243 __ mov(r0, Operand(Factory::Object_symbol()));
3244 frame_->EmitPush(r0);
3245 leave.Jump();
3246
3247 // Non-JS objects have class null.
3248 null.Bind();
3249 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3250 frame_->EmitPush(r0);
3251
3252 // All done.
3253 leave.Bind();
3254}
3255
3256
3257void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3258 VirtualFrame::SpilledScope spilled_scope;
3259 ASSERT(args->length() == 1);
3260 JumpTarget leave;
3261 LoadAndSpill(args->at(0));
3262 frame_->EmitPop(r0); // r0 contains object.
3263 // if (object->IsSmi()) return the object.
3264 __ tst(r0, Operand(kSmiTagMask));
3265 leave.Branch(eq);
3266 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3267 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3268 leave.Branch(ne);
3269 // Load the value.
3270 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3271 leave.Bind();
3272 frame_->EmitPush(r0);
3273}
3274
3275
3276void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3277 VirtualFrame::SpilledScope spilled_scope;
3278 ASSERT(args->length() == 2);
3279 JumpTarget leave;
3280 LoadAndSpill(args->at(0)); // Load the object.
3281 LoadAndSpill(args->at(1)); // Load the value.
3282 frame_->EmitPop(r0); // r0 contains value
3283 frame_->EmitPop(r1); // r1 contains object
3284 // if (object->IsSmi()) return object.
3285 __ tst(r1, Operand(kSmiTagMask));
3286 leave.Branch(eq);
3287 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3288 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3289 leave.Branch(ne);
3290 // Store the value.
3291 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3292 // Update the write barrier.
3293 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3294 __ RecordWrite(r1, r2, r3);
3295 // Leave.
3296 leave.Bind();
3297 frame_->EmitPush(r0);
3298}
3299
3300
3301void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3302 VirtualFrame::SpilledScope spilled_scope;
3303 ASSERT(args->length() == 1);
3304 LoadAndSpill(args->at(0));
3305 frame_->EmitPop(r0);
3306 __ tst(r0, Operand(kSmiTagMask));
3307 cc_reg_ = eq;
3308}
3309
3310
3311void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3312 VirtualFrame::SpilledScope spilled_scope;
3313 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3314 ASSERT_EQ(args->length(), 3);
3315#ifdef ENABLE_LOGGING_AND_PROFILING
3316 if (ShouldGenerateLog(args->at(0))) {
3317 LoadAndSpill(args->at(1));
3318 LoadAndSpill(args->at(2));
3319 __ CallRuntime(Runtime::kLog, 2);
3320 }
3321#endif
3322 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3323 frame_->EmitPush(r0);
3324}
3325
3326
3327void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3328 VirtualFrame::SpilledScope spilled_scope;
3329 ASSERT(args->length() == 1);
3330 LoadAndSpill(args->at(0));
3331 frame_->EmitPop(r0);
3332 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3333 cc_reg_ = eq;
3334}
3335
3336
3337// This should generate code that performs a charCodeAt() call or returns
3338// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3339// It is not yet implemented on ARM, so it always goes to the slow case.
3340void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3341 VirtualFrame::SpilledScope spilled_scope;
3342 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003343 Comment(masm_, "[ GenerateFastCharCodeAt");
3344
3345 LoadAndSpill(args->at(0));
3346 LoadAndSpill(args->at(1));
3347 frame_->EmitPop(r0); // Index.
3348 frame_->EmitPop(r1); // String.
3349
3350 Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
3351
3352 __ tst(r1, Operand(kSmiTagMask));
3353 __ b(eq, &slow); // The 'string' was a Smi.
3354
3355 ASSERT(kSmiTag == 0);
3356 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3357 __ b(ne, &slow); // The index was negative or not a Smi.
3358
3359 __ bind(&try_again_with_new_string);
3360 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
3361 __ b(ge, &slow);
3362
3363 // Now r2 has the string type.
3364 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
3365 // Now r3 has the length of the string. Compare with the index.
3366 __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
3367 __ b(le, &slow);
3368
3369 // Here we know the index is in range. Check that string is sequential.
3370 ASSERT_EQ(0, kSeqStringTag);
3371 __ tst(r2, Operand(kStringRepresentationMask));
3372 __ b(ne, &not_a_flat_string);
3373
3374 // Check whether it is an ASCII string.
3375 ASSERT_EQ(0, kTwoByteStringTag);
3376 __ tst(r2, Operand(kStringEncodingMask));
3377 __ b(ne, &ascii_string);
3378
3379 // 2-byte string. We can add without shifting since the Smi tag size is the
3380 // log2 of the number of bytes in a two-byte character.
3381 ASSERT_EQ(1, kSmiTagSize);
3382 ASSERT_EQ(0, kSmiShiftSize);
3383 __ add(r1, r1, Operand(r0));
3384 __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
3385 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3386 __ jmp(&end);
3387
3388 __ bind(&ascii_string);
3389 __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
3390 __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
3391 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3392 __ jmp(&end);
3393
3394 __ bind(&not_a_flat_string);
3395 __ and_(r2, r2, Operand(kStringRepresentationMask));
3396 __ cmp(r2, Operand(kConsStringTag));
3397 __ b(ne, &slow);
3398
3399 // ConsString.
3400 // Check that the right hand side is the empty string (ie if this is really a
3401 // flat string in a cons string). If that is not the case we would rather go
3402 // to the runtime system now, to flatten the string.
3403 __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
3404 __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
3405 __ cmp(r2, Operand(r3));
3406 __ b(ne, &slow);
3407
3408 // Get the first of the two strings.
3409 __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
3410 __ jmp(&try_again_with_new_string);
3411
3412 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00003413 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00003414
3415 __ bind(&end);
Steve Blocka7e24c12009-10-30 11:49:00 +00003416 frame_->EmitPush(r0);
3417}
3418
3419
3420void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3421 VirtualFrame::SpilledScope spilled_scope;
3422 ASSERT(args->length() == 1);
3423 LoadAndSpill(args->at(0));
3424 JumpTarget answer;
3425 // We need the CC bits to come out as not_equal in the case where the
3426 // object is a smi. This can't be done with the usual test opcode so
3427 // we use XOR to get the right CC bits.
3428 frame_->EmitPop(r0);
3429 __ and_(r1, r0, Operand(kSmiTagMask));
3430 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3431 answer.Branch(ne);
3432 // It is a heap object - get the map. Check if the object is a JS array.
3433 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3434 answer.Bind();
3435 cc_reg_ = eq;
3436}
3437
3438
Steve Blockd0582a62009-12-15 09:54:21 +00003439void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3440 // This generates a fast version of:
3441 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3442 VirtualFrame::SpilledScope spilled_scope;
3443 ASSERT(args->length() == 1);
3444 LoadAndSpill(args->at(0));
3445 frame_->EmitPop(r1);
3446 __ tst(r1, Operand(kSmiTagMask));
3447 false_target()->Branch(eq);
3448
3449 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3450 __ cmp(r1, ip);
3451 true_target()->Branch(eq);
3452
3453 Register map_reg = r2;
3454 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
3455 // Undetectable objects behave like undefined when tested with typeof.
3456 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
3457 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
3458 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
3459 false_target()->Branch(eq);
3460
3461 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
3462 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
3463 false_target()->Branch(lt);
3464 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
3465 cc_reg_ = le;
3466}
3467
3468
3469void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3470 // This generates a fast version of:
3471 // (%_ClassOf(arg) === 'Function')
3472 VirtualFrame::SpilledScope spilled_scope;
3473 ASSERT(args->length() == 1);
3474 LoadAndSpill(args->at(0));
3475 frame_->EmitPop(r0);
3476 __ tst(r0, Operand(kSmiTagMask));
3477 false_target()->Branch(eq);
3478 Register map_reg = r2;
3479 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
3480 cc_reg_ = eq;
3481}
3482
3483
Steve Blocka7e24c12009-10-30 11:49:00 +00003484void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3485 VirtualFrame::SpilledScope spilled_scope;
3486 ASSERT(args->length() == 0);
3487
3488 // Get the frame pointer for the calling frame.
3489 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3490
3491 // Skip the arguments adaptor frame if it exists.
3492 Label check_frame_marker;
3493 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3494 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3495 __ b(ne, &check_frame_marker);
3496 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3497
3498 // Check the marker in the calling frame.
3499 __ bind(&check_frame_marker);
3500 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3501 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3502 cc_reg_ = eq;
3503}
3504
3505
3506void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3507 VirtualFrame::SpilledScope spilled_scope;
3508 ASSERT(args->length() == 0);
3509
3510 // Seed the result with the formal parameters count, which will be used
3511 // in case no arguments adaptor frame is found below the current frame.
3512 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3513
3514 // Call the shared stub to get to the arguments.length.
3515 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3516 frame_->CallStub(&stub, 0);
3517 frame_->EmitPush(r0);
3518}
3519
3520
3521void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3522 VirtualFrame::SpilledScope spilled_scope;
3523 ASSERT(args->length() == 1);
3524
3525 // Satisfy contract with ArgumentsAccessStub:
3526 // Load the key into r1 and the formal parameters count into r0.
3527 LoadAndSpill(args->at(0));
3528 frame_->EmitPop(r1);
3529 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3530
3531 // Call the shared stub to get to arguments[key].
3532 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3533 frame_->CallStub(&stub, 0);
3534 frame_->EmitPush(r0);
3535}
3536
3537
3538void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3539 VirtualFrame::SpilledScope spilled_scope;
3540 ASSERT(args->length() == 0);
3541 __ Call(ExternalReference::random_positive_smi_function().address(),
3542 RelocInfo::RUNTIME_ENTRY);
3543 frame_->EmitPush(r0);
3544}
3545
3546
3547void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3548 VirtualFrame::SpilledScope spilled_scope;
3549 LoadAndSpill(args->at(0));
3550 switch (op) {
3551 case SIN:
3552 frame_->CallRuntime(Runtime::kMath_sin, 1);
3553 break;
3554 case COS:
3555 frame_->CallRuntime(Runtime::kMath_cos, 1);
3556 break;
3557 }
3558 frame_->EmitPush(r0);
3559}
3560
3561
Steve Blockd0582a62009-12-15 09:54:21 +00003562void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3563 ASSERT_EQ(2, args->length());
3564
3565 Load(args->at(0));
3566 Load(args->at(1));
3567
3568 frame_->CallRuntime(Runtime::kStringAdd, 2);
3569 frame_->EmitPush(r0);
3570}
3571
3572
Steve Blocka7e24c12009-10-30 11:49:00 +00003573void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3574 VirtualFrame::SpilledScope spilled_scope;
3575 ASSERT(args->length() == 2);
3576
3577 // Load the two objects into registers and perform the comparison.
3578 LoadAndSpill(args->at(0));
3579 LoadAndSpill(args->at(1));
3580 frame_->EmitPop(r0);
3581 frame_->EmitPop(r1);
3582 __ cmp(r0, Operand(r1));
3583 cc_reg_ = eq;
3584}
3585
3586
3587void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3588#ifdef DEBUG
3589 int original_height = frame_->height();
3590#endif
3591 VirtualFrame::SpilledScope spilled_scope;
3592 if (CheckForInlineRuntimeCall(node)) {
3593 ASSERT((has_cc() && frame_->height() == original_height) ||
3594 (!has_cc() && frame_->height() == original_height + 1));
3595 return;
3596 }
3597
3598 ZoneList<Expression*>* args = node->arguments();
3599 Comment cmnt(masm_, "[ CallRuntime");
3600 Runtime::Function* function = node->function();
3601
3602 if (function == NULL) {
3603 // Prepare stack for calling JS runtime function.
3604 __ mov(r0, Operand(node->name()));
3605 frame_->EmitPush(r0);
3606 // Push the builtins object found in the current global object.
3607 __ ldr(r1, GlobalObject());
3608 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3609 frame_->EmitPush(r0);
3610 }
3611
3612 // Push the arguments ("left-to-right").
3613 int arg_count = args->length();
3614 for (int i = 0; i < arg_count; i++) {
3615 LoadAndSpill(args->at(i));
3616 }
3617
3618 if (function == NULL) {
3619 // Call the JS runtime function.
3620 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3621 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3622 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3623 __ ldr(cp, frame_->Context());
3624 frame_->Drop();
3625 frame_->EmitPush(r0);
3626 } else {
3627 // Call the C runtime function.
3628 frame_->CallRuntime(function, arg_count);
3629 frame_->EmitPush(r0);
3630 }
3631 ASSERT(frame_->height() == original_height + 1);
3632}
3633
3634
3635void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3636#ifdef DEBUG
3637 int original_height = frame_->height();
3638#endif
3639 VirtualFrame::SpilledScope spilled_scope;
3640 Comment cmnt(masm_, "[ UnaryOperation");
3641
3642 Token::Value op = node->op();
3643
3644 if (op == Token::NOT) {
3645 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003646 false_target(),
3647 true_target(),
3648 true);
3649 // LoadCondition may (and usually does) leave a test and branch to
3650 // be emitted by the caller. In that case, negate the condition.
3651 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3652
3653 } else if (op == Token::DELETE) {
3654 Property* property = node->expression()->AsProperty();
3655 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3656 if (property != NULL) {
3657 LoadAndSpill(property->obj());
3658 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00003659 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003660
3661 } else if (variable != NULL) {
3662 Slot* slot = variable->slot();
3663 if (variable->is_global()) {
3664 LoadGlobal();
3665 __ mov(r0, Operand(variable->name()));
3666 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003667 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003668
3669 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3670 // lookup the context holding the named variable
3671 frame_->EmitPush(cp);
3672 __ mov(r0, Operand(variable->name()));
3673 frame_->EmitPush(r0);
3674 frame_->CallRuntime(Runtime::kLookupContext, 2);
3675 // r0: context
3676 frame_->EmitPush(r0);
3677 __ mov(r0, Operand(variable->name()));
3678 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003679 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003680
3681 } else {
3682 // Default: Result of deleting non-global, not dynamically
3683 // introduced variables is false.
3684 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3685 }
3686
3687 } else {
3688 // Default: Result of deleting expressions is true.
3689 LoadAndSpill(node->expression()); // may have side-effects
3690 frame_->Drop();
3691 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3692 }
3693 frame_->EmitPush(r0);
3694
3695 } else if (op == Token::TYPEOF) {
3696 // Special case for loading the typeof expression; see comment on
3697 // LoadTypeofExpression().
3698 LoadTypeofExpression(node->expression());
3699 frame_->CallRuntime(Runtime::kTypeof, 1);
3700 frame_->EmitPush(r0); // r0 has result
3701
3702 } else {
3703 LoadAndSpill(node->expression());
3704 frame_->EmitPop(r0);
3705 switch (op) {
3706 case Token::NOT:
3707 case Token::DELETE:
3708 case Token::TYPEOF:
3709 UNREACHABLE(); // handled above
3710 break;
3711
3712 case Token::SUB: {
3713 bool overwrite =
3714 (node->expression()->AsBinaryOperation() != NULL &&
3715 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
3716 UnarySubStub stub(overwrite);
3717 frame_->CallStub(&stub, 0);
3718 break;
3719 }
3720
3721 case Token::BIT_NOT: {
3722 // smi check
3723 JumpTarget smi_label;
3724 JumpTarget continue_label;
3725 __ tst(r0, Operand(kSmiTagMask));
3726 smi_label.Branch(eq);
3727
3728 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003729 frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003730
3731 continue_label.Jump();
3732 smi_label.Bind();
3733 __ mvn(r0, Operand(r0));
3734 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3735 continue_label.Bind();
3736 break;
3737 }
3738
3739 case Token::VOID:
3740 // since the stack top is cached in r0, popping and then
3741 // pushing a value can be done by just writing to r0.
3742 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3743 break;
3744
3745 case Token::ADD: {
3746 // Smi check.
3747 JumpTarget continue_label;
3748 __ tst(r0, Operand(kSmiTagMask));
3749 continue_label.Branch(eq);
3750 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003751 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003752 continue_label.Bind();
3753 break;
3754 }
3755 default:
3756 UNREACHABLE();
3757 }
3758 frame_->EmitPush(r0); // r0 has result
3759 }
3760 ASSERT(!has_valid_frame() ||
3761 (has_cc() && frame_->height() == original_height) ||
3762 (!has_cc() && frame_->height() == original_height + 1));
3763}
3764
3765
3766void CodeGenerator::VisitCountOperation(CountOperation* node) {
3767#ifdef DEBUG
3768 int original_height = frame_->height();
3769#endif
3770 VirtualFrame::SpilledScope spilled_scope;
3771 Comment cmnt(masm_, "[ CountOperation");
3772
3773 bool is_postfix = node->is_postfix();
3774 bool is_increment = node->op() == Token::INC;
3775
3776 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3777 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3778
3779 // Postfix: Make room for the result.
3780 if (is_postfix) {
3781 __ mov(r0, Operand(0));
3782 frame_->EmitPush(r0);
3783 }
3784
3785 { Reference target(this, node->expression());
3786 if (target.is_illegal()) {
3787 // Spoof the virtual frame to have the expected height (one higher
3788 // than on entry).
3789 if (!is_postfix) {
3790 __ mov(r0, Operand(Smi::FromInt(0)));
3791 frame_->EmitPush(r0);
3792 }
3793 ASSERT(frame_->height() == original_height + 1);
3794 return;
3795 }
Steve Blockd0582a62009-12-15 09:54:21 +00003796 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00003797 frame_->EmitPop(r0);
3798
3799 JumpTarget slow;
3800 JumpTarget exit;
3801
3802 // Load the value (1) into register r1.
3803 __ mov(r1, Operand(Smi::FromInt(1)));
3804
3805 // Check for smi operand.
3806 __ tst(r0, Operand(kSmiTagMask));
3807 slow.Branch(ne);
3808
3809 // Postfix: Store the old value as the result.
3810 if (is_postfix) {
3811 __ str(r0, frame_->ElementAt(target.size()));
3812 }
3813
3814 // Perform optimistic increment/decrement.
3815 if (is_increment) {
3816 __ add(r0, r0, Operand(r1), SetCC);
3817 } else {
3818 __ sub(r0, r0, Operand(r1), SetCC);
3819 }
3820
3821 // If the increment/decrement didn't overflow, we're done.
3822 exit.Branch(vc);
3823
3824 // Revert optimistic increment/decrement.
3825 if (is_increment) {
3826 __ sub(r0, r0, Operand(r1));
3827 } else {
3828 __ add(r0, r0, Operand(r1));
3829 }
3830
3831 // Slow case: Convert to number.
3832 slow.Bind();
3833 {
3834 // Convert the operand to a number.
3835 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003836 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003837 }
3838 if (is_postfix) {
3839 // Postfix: store to result (on the stack).
3840 __ str(r0, frame_->ElementAt(target.size()));
3841 }
3842
3843 // Compute the new value.
3844 __ mov(r1, Operand(Smi::FromInt(1)));
3845 frame_->EmitPush(r0);
3846 frame_->EmitPush(r1);
3847 if (is_increment) {
3848 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3849 } else {
3850 frame_->CallRuntime(Runtime::kNumberSub, 2);
3851 }
3852
3853 // Store the new value in the target if not const.
3854 exit.Bind();
3855 frame_->EmitPush(r0);
3856 if (!is_const) target.SetValue(NOT_CONST_INIT);
3857 }
3858
3859 // Postfix: Discard the new value and use the old.
3860 if (is_postfix) frame_->EmitPop(r0);
3861 ASSERT(frame_->height() == original_height + 1);
3862}
3863
3864
3865void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3866#ifdef DEBUG
3867 int original_height = frame_->height();
3868#endif
3869 VirtualFrame::SpilledScope spilled_scope;
3870 Comment cmnt(masm_, "[ BinaryOperation");
3871 Token::Value op = node->op();
3872
3873 // According to ECMA-262 section 11.11, page 58, the binary logical
3874 // operators must yield the result of one of the two expressions
3875 // before any ToBoolean() conversions. This means that the value
3876 // produced by a && or || operator is not necessarily a boolean.
3877
3878 // NOTE: If the left hand side produces a materialized value (not in
3879 // the CC register), we force the right hand side to do the
3880 // same. This is necessary because we may have to branch to the exit
3881 // after evaluating the left hand side (due to the shortcut
3882 // semantics), but the compiler must (statically) know if the result
3883 // of compiling the binary operation is materialized or not.
3884
3885 if (op == Token::AND) {
3886 JumpTarget is_true;
3887 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003888 &is_true,
3889 false_target(),
3890 false);
3891 if (has_valid_frame() && !has_cc()) {
3892 // The left-hand side result is on top of the virtual frame.
3893 JumpTarget pop_and_continue;
3894 JumpTarget exit;
3895
3896 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3897 frame_->EmitPush(r0);
3898 // Avoid popping the result if it converts to 'false' using the
3899 // standard ToBoolean() conversion as described in ECMA-262,
3900 // section 9.2, page 30.
3901 ToBoolean(&pop_and_continue, &exit);
3902 Branch(false, &exit);
3903
3904 // Pop the result of evaluating the first part.
3905 pop_and_continue.Bind();
3906 frame_->EmitPop(r0);
3907
3908 // Evaluate right side expression.
3909 is_true.Bind();
3910 LoadAndSpill(node->right());
3911
3912 // Exit (always with a materialized value).
3913 exit.Bind();
3914 } else if (has_cc() || is_true.is_linked()) {
3915 // The left-hand side is either (a) partially compiled to
3916 // control flow with a final branch left to emit or (b) fully
3917 // compiled to control flow and possibly true.
3918 if (has_cc()) {
3919 Branch(false, false_target());
3920 }
3921 is_true.Bind();
3922 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003923 true_target(),
3924 false_target(),
3925 false);
3926 } else {
3927 // Nothing to do.
3928 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
3929 }
3930
3931 } else if (op == Token::OR) {
3932 JumpTarget is_false;
3933 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003934 true_target(),
3935 &is_false,
3936 false);
3937 if (has_valid_frame() && !has_cc()) {
3938 // The left-hand side result is on top of the virtual frame.
3939 JumpTarget pop_and_continue;
3940 JumpTarget exit;
3941
3942 __ ldr(r0, frame_->Top());
3943 frame_->EmitPush(r0);
3944 // Avoid popping the result if it converts to 'true' using the
3945 // standard ToBoolean() conversion as described in ECMA-262,
3946 // section 9.2, page 30.
3947 ToBoolean(&exit, &pop_and_continue);
3948 Branch(true, &exit);
3949
3950 // Pop the result of evaluating the first part.
3951 pop_and_continue.Bind();
3952 frame_->EmitPop(r0);
3953
3954 // Evaluate right side expression.
3955 is_false.Bind();
3956 LoadAndSpill(node->right());
3957
3958 // Exit (always with a materialized value).
3959 exit.Bind();
3960 } else if (has_cc() || is_false.is_linked()) {
3961 // The left-hand side is either (a) partially compiled to
3962 // control flow with a final branch left to emit or (b) fully
3963 // compiled to control flow and possibly false.
3964 if (has_cc()) {
3965 Branch(true, true_target());
3966 }
3967 is_false.Bind();
3968 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003969 true_target(),
3970 false_target(),
3971 false);
3972 } else {
3973 // Nothing to do.
3974 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
3975 }
3976
3977 } else {
3978 // Optimize for the case where (at least) one of the expressions
3979 // is a literal small integer.
3980 Literal* lliteral = node->left()->AsLiteral();
3981 Literal* rliteral = node->right()->AsLiteral();
3982 // NOTE: The code below assumes that the slow cases (calls to runtime)
3983 // never return a constant/immutable object.
3984 bool overwrite_left =
3985 (node->left()->AsBinaryOperation() != NULL &&
3986 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
3987 bool overwrite_right =
3988 (node->right()->AsBinaryOperation() != NULL &&
3989 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
3990
3991 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
3992 LoadAndSpill(node->left());
3993 SmiOperation(node->op(),
3994 rliteral->handle(),
3995 false,
3996 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
3997
3998 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
3999 LoadAndSpill(node->right());
4000 SmiOperation(node->op(),
4001 lliteral->handle(),
4002 true,
4003 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
4004
4005 } else {
4006 OverwriteMode overwrite_mode = NO_OVERWRITE;
4007 if (overwrite_left) {
4008 overwrite_mode = OVERWRITE_LEFT;
4009 } else if (overwrite_right) {
4010 overwrite_mode = OVERWRITE_RIGHT;
4011 }
4012 LoadAndSpill(node->left());
4013 LoadAndSpill(node->right());
4014 GenericBinaryOperation(node->op(), overwrite_mode);
4015 }
4016 frame_->EmitPush(r0);
4017 }
4018 ASSERT(!has_valid_frame() ||
4019 (has_cc() && frame_->height() == original_height) ||
4020 (!has_cc() && frame_->height() == original_height + 1));
4021}
4022
4023
4024void CodeGenerator::VisitThisFunction(ThisFunction* node) {
4025#ifdef DEBUG
4026 int original_height = frame_->height();
4027#endif
4028 VirtualFrame::SpilledScope spilled_scope;
4029 __ ldr(r0, frame_->Function());
4030 frame_->EmitPush(r0);
4031 ASSERT(frame_->height() == original_height + 1);
4032}
4033
4034
4035void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
4036#ifdef DEBUG
4037 int original_height = frame_->height();
4038#endif
4039 VirtualFrame::SpilledScope spilled_scope;
4040 Comment cmnt(masm_, "[ CompareOperation");
4041
4042 // Get the expressions from the node.
4043 Expression* left = node->left();
4044 Expression* right = node->right();
4045 Token::Value op = node->op();
4046
4047 // To make null checks efficient, we check if either left or right is the
4048 // literal 'null'. If so, we optimize the code by inlining a null check
4049 // instead of calling the (very) general runtime routine for checking
4050 // equality.
4051 if (op == Token::EQ || op == Token::EQ_STRICT) {
4052 bool left_is_null =
4053 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
4054 bool right_is_null =
4055 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
4056 // The 'null' value can only be equal to 'null' or 'undefined'.
4057 if (left_is_null || right_is_null) {
4058 LoadAndSpill(left_is_null ? right : left);
4059 frame_->EmitPop(r0);
4060 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4061 __ cmp(r0, ip);
4062
4063 // The 'null' value is only equal to 'undefined' if using non-strict
4064 // comparisons.
4065 if (op != Token::EQ_STRICT) {
4066 true_target()->Branch(eq);
4067
4068 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4069 __ cmp(r0, Operand(ip));
4070 true_target()->Branch(eq);
4071
4072 __ tst(r0, Operand(kSmiTagMask));
4073 false_target()->Branch(eq);
4074
4075 // It can be an undetectable object.
4076 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
4077 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
4078 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
4079 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
4080 }
4081
4082 cc_reg_ = eq;
4083 ASSERT(has_cc() && frame_->height() == original_height);
4084 return;
4085 }
4086 }
4087
4088 // To make typeof testing for natives implemented in JavaScript really
4089 // efficient, we generate special code for expressions of the form:
4090 // 'typeof <expression> == <string>'.
4091 UnaryOperation* operation = left->AsUnaryOperation();
4092 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
4093 (operation != NULL && operation->op() == Token::TYPEOF) &&
4094 (right->AsLiteral() != NULL &&
4095 right->AsLiteral()->handle()->IsString())) {
4096 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
4097
4098 // Load the operand, move it to register r1.
4099 LoadTypeofExpression(operation->expression());
4100 frame_->EmitPop(r1);
4101
4102 if (check->Equals(Heap::number_symbol())) {
4103 __ tst(r1, Operand(kSmiTagMask));
4104 true_target()->Branch(eq);
4105 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4106 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4107 __ cmp(r1, ip);
4108 cc_reg_ = eq;
4109
4110 } else if (check->Equals(Heap::string_symbol())) {
4111 __ tst(r1, Operand(kSmiTagMask));
4112 false_target()->Branch(eq);
4113
4114 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4115
4116 // It can be an undetectable string object.
4117 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4118 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4119 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4120 false_target()->Branch(eq);
4121
4122 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4123 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
4124 cc_reg_ = lt;
4125
4126 } else if (check->Equals(Heap::boolean_symbol())) {
4127 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4128 __ cmp(r1, ip);
4129 true_target()->Branch(eq);
4130 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4131 __ cmp(r1, ip);
4132 cc_reg_ = eq;
4133
4134 } else if (check->Equals(Heap::undefined_symbol())) {
4135 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4136 __ cmp(r1, ip);
4137 true_target()->Branch(eq);
4138
4139 __ tst(r1, Operand(kSmiTagMask));
4140 false_target()->Branch(eq);
4141
4142 // It can be an undetectable object.
4143 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4144 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4145 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4146 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4147
4148 cc_reg_ = eq;
4149
4150 } else if (check->Equals(Heap::function_symbol())) {
4151 __ tst(r1, Operand(kSmiTagMask));
4152 false_target()->Branch(eq);
Steve Blockd0582a62009-12-15 09:54:21 +00004153 Register map_reg = r2;
4154 __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
4155 true_target()->Branch(eq);
4156 // Regular expressions are callable so typeof == 'function'.
4157 __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 cc_reg_ = eq;
4159
4160 } else if (check->Equals(Heap::object_symbol())) {
4161 __ tst(r1, Operand(kSmiTagMask));
4162 false_target()->Branch(eq);
4163
Steve Blocka7e24c12009-10-30 11:49:00 +00004164 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4165 __ cmp(r1, ip);
4166 true_target()->Branch(eq);
4167
Steve Blockd0582a62009-12-15 09:54:21 +00004168 Register map_reg = r2;
4169 __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
4170 false_target()->Branch(eq);
4171
Steve Blocka7e24c12009-10-30 11:49:00 +00004172 // It can be an undetectable object.
Steve Blockd0582a62009-12-15 09:54:21 +00004173 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004174 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4175 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4176 false_target()->Branch(eq);
4177
Steve Blockd0582a62009-12-15 09:54:21 +00004178 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4179 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004180 false_target()->Branch(lt);
Steve Blockd0582a62009-12-15 09:54:21 +00004181 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004182 cc_reg_ = le;
4183
4184 } else {
4185 // Uncommon case: typeof testing against a string literal that is
4186 // never returned from the typeof operator.
4187 false_target()->Jump();
4188 }
4189 ASSERT(!has_valid_frame() ||
4190 (has_cc() && frame_->height() == original_height));
4191 return;
4192 }
4193
4194 switch (op) {
4195 case Token::EQ:
4196 Comparison(eq, left, right, false);
4197 break;
4198
4199 case Token::LT:
4200 Comparison(lt, left, right);
4201 break;
4202
4203 case Token::GT:
4204 Comparison(gt, left, right);
4205 break;
4206
4207 case Token::LTE:
4208 Comparison(le, left, right);
4209 break;
4210
4211 case Token::GTE:
4212 Comparison(ge, left, right);
4213 break;
4214
4215 case Token::EQ_STRICT:
4216 Comparison(eq, left, right, true);
4217 break;
4218
4219 case Token::IN: {
4220 LoadAndSpill(left);
4221 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00004222 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004223 frame_->EmitPush(r0);
4224 break;
4225 }
4226
4227 case Token::INSTANCEOF: {
4228 LoadAndSpill(left);
4229 LoadAndSpill(right);
4230 InstanceofStub stub;
4231 frame_->CallStub(&stub, 2);
4232 // At this point if instanceof succeeded then r0 == 0.
4233 __ tst(r0, Operand(r0));
4234 cc_reg_ = eq;
4235 break;
4236 }
4237
4238 default:
4239 UNREACHABLE();
4240 }
4241 ASSERT((has_cc() && frame_->height() == original_height) ||
4242 (!has_cc() && frame_->height() == original_height + 1));
4243}
4244
4245
4246#ifdef DEBUG
4247bool CodeGenerator::HasValidEntryRegisters() { return true; }
4248#endif
4249
4250
4251#undef __
4252#define __ ACCESS_MASM(masm)
4253
4254
4255Handle<String> Reference::GetName() {
4256 ASSERT(type_ == NAMED);
4257 Property* property = expression_->AsProperty();
4258 if (property == NULL) {
4259 // Global variable reference treated as a named property reference.
4260 VariableProxy* proxy = expression_->AsVariableProxy();
4261 ASSERT(proxy->AsVariable() != NULL);
4262 ASSERT(proxy->AsVariable()->is_global());
4263 return proxy->name();
4264 } else {
4265 Literal* raw_name = property->key()->AsLiteral();
4266 ASSERT(raw_name != NULL);
4267 return Handle<String>(String::cast(*raw_name->handle()));
4268 }
4269}
4270
4271
Steve Blockd0582a62009-12-15 09:54:21 +00004272void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004273 ASSERT(cgen_->HasValidEntryRegisters());
4274 ASSERT(!is_illegal());
4275 ASSERT(!cgen_->has_cc());
4276 MacroAssembler* masm = cgen_->masm();
4277 Property* property = expression_->AsProperty();
4278 if (property != NULL) {
4279 cgen_->CodeForSourcePosition(property->position());
4280 }
4281
4282 switch (type_) {
4283 case SLOT: {
4284 Comment cmnt(masm, "[ Load from Slot");
4285 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4286 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00004287 cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004288 break;
4289 }
4290
4291 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004292 VirtualFrame* frame = cgen_->frame();
4293 Comment cmnt(masm, "[ Load from named Property");
4294 Handle<String> name(GetName());
4295 Variable* var = expression_->AsVariableProxy()->AsVariable();
4296 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4297 // Setup the name register.
4298 Result name_reg(r2);
4299 __ mov(r2, Operand(name));
4300 ASSERT(var == NULL || var->is_global());
4301 RelocInfo::Mode rmode = (var == NULL)
4302 ? RelocInfo::CODE_TARGET
4303 : RelocInfo::CODE_TARGET_CONTEXT;
4304 frame->CallCodeObject(ic, rmode, &name_reg, 0);
4305 frame->EmitPush(r0);
4306 break;
4307 }
4308
4309 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004310 // TODO(181): Implement inlined version of array indexing once
4311 // loop nesting is properly tracked on ARM.
4312 VirtualFrame* frame = cgen_->frame();
4313 Comment cmnt(masm, "[ Load from keyed Property");
4314 ASSERT(property != NULL);
4315 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4316 Variable* var = expression_->AsVariableProxy()->AsVariable();
4317 ASSERT(var == NULL || var->is_global());
4318 RelocInfo::Mode rmode = (var == NULL)
4319 ? RelocInfo::CODE_TARGET
4320 : RelocInfo::CODE_TARGET_CONTEXT;
4321 frame->CallCodeObject(ic, rmode, 0);
4322 frame->EmitPush(r0);
4323 break;
4324 }
4325
4326 default:
4327 UNREACHABLE();
4328 }
4329}
4330
4331
4332void Reference::SetValue(InitState init_state) {
4333 ASSERT(!is_illegal());
4334 ASSERT(!cgen_->has_cc());
4335 MacroAssembler* masm = cgen_->masm();
4336 VirtualFrame* frame = cgen_->frame();
4337 Property* property = expression_->AsProperty();
4338 if (property != NULL) {
4339 cgen_->CodeForSourcePosition(property->position());
4340 }
4341
4342 switch (type_) {
4343 case SLOT: {
4344 Comment cmnt(masm, "[ Store to Slot");
4345 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4346 ASSERT(slot != NULL);
4347 if (slot->type() == Slot::LOOKUP) {
4348 ASSERT(slot->var()->is_dynamic());
4349
4350 // For now, just do a runtime call.
4351 frame->EmitPush(cp);
4352 __ mov(r0, Operand(slot->var()->name()));
4353 frame->EmitPush(r0);
4354
4355 if (init_state == CONST_INIT) {
4356 // Same as the case for a normal store, but ignores attribute
4357 // (e.g. READ_ONLY) of context slot so that we can initialize
4358 // const properties (introduced via eval("const foo = (some
4359 // expr);")). Also, uses the current function context instead of
4360 // the top context.
4361 //
4362 // Note that we must declare the foo upon entry of eval(), via a
4363 // context slot declaration, but we cannot initialize it at the
4364 // same time, because the const declaration may be at the end of
4365 // the eval code (sigh...) and the const variable may have been
4366 // used before (where its value is 'undefined'). Thus, we can only
4367 // do the initialization when we actually encounter the expression
4368 // and when the expression operands are defined and valid, and
4369 // thus we need the split into 2 operations: declaration of the
4370 // context slot followed by initialization.
4371 frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4372 } else {
4373 frame->CallRuntime(Runtime::kStoreContextSlot, 3);
4374 }
4375 // Storing a variable must keep the (new) value on the expression
4376 // stack. This is necessary for compiling assignment expressions.
4377 frame->EmitPush(r0);
4378
4379 } else {
4380 ASSERT(!slot->var()->is_dynamic());
4381
4382 JumpTarget exit;
4383 if (init_state == CONST_INIT) {
4384 ASSERT(slot->var()->mode() == Variable::CONST);
4385 // Only the first const initialization must be executed (the slot
4386 // still contains 'the hole' value). When the assignment is
4387 // executed, the code is identical to a normal store (see below).
4388 Comment cmnt(masm, "[ Init const");
4389 __ ldr(r2, cgen_->SlotOperand(slot, r2));
4390 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4391 __ cmp(r2, ip);
4392 exit.Branch(ne);
4393 }
4394
4395 // We must execute the store. Storing a variable must keep the
4396 // (new) value on the stack. This is necessary for compiling
4397 // assignment expressions.
4398 //
4399 // Note: We will reach here even with slot->var()->mode() ==
4400 // Variable::CONST because of const declarations which will
4401 // initialize consts to 'the hole' value and by doing so, end up
4402 // calling this code. r2 may be loaded with context; used below in
4403 // RecordWrite.
4404 frame->EmitPop(r0);
4405 __ str(r0, cgen_->SlotOperand(slot, r2));
4406 frame->EmitPush(r0);
4407 if (slot->type() == Slot::CONTEXT) {
4408 // Skip write barrier if the written value is a smi.
4409 __ tst(r0, Operand(kSmiTagMask));
4410 exit.Branch(eq);
4411 // r2 is loaded with context when calling SlotOperand above.
4412 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4413 __ mov(r3, Operand(offset));
4414 __ RecordWrite(r2, r3, r1);
4415 }
4416 // If we definitely did not jump over the assignment, we do not need
4417 // to bind the exit label. Doing so can defeat peephole
4418 // optimization.
4419 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
4420 exit.Bind();
4421 }
4422 }
4423 break;
4424 }
4425
4426 case NAMED: {
4427 Comment cmnt(masm, "[ Store to named Property");
4428 // Call the appropriate IC code.
4429 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4430 Handle<String> name(GetName());
4431
4432 Result value(r0);
4433 frame->EmitPop(r0);
4434
4435 // Setup the name register.
4436 Result property_name(r2);
4437 __ mov(r2, Operand(name));
4438 frame->CallCodeObject(ic,
4439 RelocInfo::CODE_TARGET,
4440 &value,
4441 &property_name,
4442 0);
4443 frame->EmitPush(r0);
4444 break;
4445 }
4446
4447 case KEYED: {
4448 Comment cmnt(masm, "[ Store to keyed Property");
4449 Property* property = expression_->AsProperty();
4450 ASSERT(property != NULL);
4451 cgen_->CodeForSourcePosition(property->position());
4452
4453 // Call IC code.
4454 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4455 // TODO(1222589): Make the IC grab the values from the stack.
4456 Result value(r0);
4457 frame->EmitPop(r0); // value
4458 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
4459 frame->EmitPush(r0);
4460 break;
4461 }
4462
4463 default:
4464 UNREACHABLE();
4465 }
4466}
4467
4468
4469// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4470// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4471// (31 instead of 32).
4472static void CountLeadingZeros(
4473 MacroAssembler* masm,
4474 Register source,
4475 Register scratch,
4476 Register zeros) {
4477#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4478 __ clz(zeros, source); // This instruction is only supported after ARM5.
4479#else
4480 __ mov(zeros, Operand(0));
4481 __ mov(scratch, source);
4482 // Top 16.
4483 __ tst(scratch, Operand(0xffff0000));
4484 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4485 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4486 // Top 8.
4487 __ tst(scratch, Operand(0xff000000));
4488 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4489 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4490 // Top 4.
4491 __ tst(scratch, Operand(0xf0000000));
4492 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4493 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4494 // Top 2.
4495 __ tst(scratch, Operand(0xc0000000));
4496 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4497 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4498 // Top bit.
4499 __ tst(scratch, Operand(0x80000000u));
4500 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4501#endif
4502}
4503
4504
4505// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4506// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4507// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4508// scratch register. Destroys the source register. No GC occurs during this
4509// stub so you don't have to set up the frame.
4510class ConvertToDoubleStub : public CodeStub {
4511 public:
4512 ConvertToDoubleStub(Register result_reg_1,
4513 Register result_reg_2,
4514 Register source_reg,
4515 Register scratch_reg)
4516 : result1_(result_reg_1),
4517 result2_(result_reg_2),
4518 source_(source_reg),
4519 zeros_(scratch_reg) { }
4520
4521 private:
4522 Register result1_;
4523 Register result2_;
4524 Register source_;
4525 Register zeros_;
4526
4527 // Minor key encoding in 16 bits.
4528 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4529 class OpBits: public BitField<Token::Value, 2, 14> {};
4530
4531 Major MajorKey() { return ConvertToDouble; }
4532 int MinorKey() {
4533 // Encode the parameters in a unique 16 bit value.
4534 return result1_.code() +
4535 (result2_.code() << 4) +
4536 (source_.code() << 8) +
4537 (zeros_.code() << 12);
4538 }
4539
4540 void Generate(MacroAssembler* masm);
4541
4542 const char* GetName() { return "ConvertToDoubleStub"; }
4543
4544#ifdef DEBUG
4545 void Print() { PrintF("ConvertToDoubleStub\n"); }
4546#endif
4547};
4548
4549
4550void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4551#ifndef BIG_ENDIAN_FLOATING_POINT
4552 Register exponent = result1_;
4553 Register mantissa = result2_;
4554#else
4555 Register exponent = result2_;
4556 Register mantissa = result1_;
4557#endif
4558 Label not_special;
4559 // Convert from Smi to integer.
4560 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4561 // Move sign bit from source to destination. This works because the sign bit
4562 // in the exponent word of the double has the same position and polarity as
4563 // the 2's complement sign bit in a Smi.
4564 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4565 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4566 // Subtract from 0 if source was negative.
4567 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4568 __ cmp(source_, Operand(1));
4569 __ b(gt, &not_special);
4570
4571 // We have -1, 0 or 1, which we treat specially.
4572 __ cmp(source_, Operand(0));
4573 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4574 static const uint32_t exponent_word_for_1 =
4575 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4576 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4577 // 1, 0 and -1 all have 0 for the second word.
4578 __ mov(mantissa, Operand(0));
4579 __ Ret();
4580
4581 __ bind(&not_special);
4582 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4583 // Gets the wrong answer for 0, but we already checked for that case above.
4584 CountLeadingZeros(masm, source_, mantissa, zeros_);
4585 // Compute exponent and or it into the exponent register.
4586 // We use result2 as a scratch register here.
4587 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4588 __ orr(exponent,
4589 exponent,
4590 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4591 // Shift up the source chopping the top bit off.
4592 __ add(zeros_, zeros_, Operand(1));
4593 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4594 __ mov(source_, Operand(source_, LSL, zeros_));
4595 // Compute lower part of fraction (last 12 bits).
4596 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4597 // And the top (top 20 bits).
4598 __ orr(exponent,
4599 exponent,
4600 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4601 __ Ret();
4602}
4603
4604
4605// This stub can convert a signed int32 to a heap number (double). It does
4606// not work for int32s that are in Smi range! No GC occurs during this stub
4607// so you don't have to set up the frame.
4608class WriteInt32ToHeapNumberStub : public CodeStub {
4609 public:
4610 WriteInt32ToHeapNumberStub(Register the_int,
4611 Register the_heap_number,
4612 Register scratch)
4613 : the_int_(the_int),
4614 the_heap_number_(the_heap_number),
4615 scratch_(scratch) { }
4616
4617 private:
4618 Register the_int_;
4619 Register the_heap_number_;
4620 Register scratch_;
4621
4622 // Minor key encoding in 16 bits.
4623 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4624 class OpBits: public BitField<Token::Value, 2, 14> {};
4625
4626 Major MajorKey() { return WriteInt32ToHeapNumber; }
4627 int MinorKey() {
4628 // Encode the parameters in a unique 16 bit value.
4629 return the_int_.code() +
4630 (the_heap_number_.code() << 4) +
4631 (scratch_.code() << 8);
4632 }
4633
4634 void Generate(MacroAssembler* masm);
4635
4636 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4637
4638#ifdef DEBUG
4639 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4640#endif
4641};
4642
4643
4644// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00004645void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004646 Label max_negative_int;
4647 // the_int_ has the answer which is a signed int32 but not a Smi.
4648 // We test for the special value that has a different exponent. This test
4649 // has the neat side effect of setting the flags according to the sign.
4650 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4651 __ cmp(the_int_, Operand(0x80000000u));
4652 __ b(eq, &max_negative_int);
4653 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4654 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4655 uint32_t non_smi_exponent =
4656 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4657 __ mov(scratch_, Operand(non_smi_exponent));
4658 // Set the sign bit in scratch_ if the value was negative.
4659 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4660 // Subtract from 0 if the value was negative.
4661 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4662 // We should be masking the implict first digit of the mantissa away here,
4663 // but it just ends up combining harmlessly with the last digit of the
4664 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4665 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4666 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4667 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4668 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4669 __ str(scratch_, FieldMemOperand(the_heap_number_,
4670 HeapNumber::kExponentOffset));
4671 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4672 __ str(scratch_, FieldMemOperand(the_heap_number_,
4673 HeapNumber::kMantissaOffset));
4674 __ Ret();
4675
4676 __ bind(&max_negative_int);
4677 // The max negative int32 is stored as a positive number in the mantissa of
4678 // a double because it uses a sign bit instead of using two's complement.
4679 // The actual mantissa bits stored are all 0 because the implicit most
4680 // significant 1 bit is not stored.
4681 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4682 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4683 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4684 __ mov(ip, Operand(0));
4685 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4686 __ Ret();
4687}
4688
4689
4690// Handle the case where the lhs and rhs are the same object.
4691// Equality is almost reflexive (everything but NaN), so this is a test
4692// for "identity and not NaN".
4693static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4694 Label* slow,
4695 Condition cc) {
4696 Label not_identical;
4697 __ cmp(r0, Operand(r1));
4698 __ b(ne, &not_identical);
4699
4700 Register exp_mask_reg = r5;
4701 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4702
4703 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4704 // so we do the second best thing - test it ourselves.
4705 Label heap_number, return_equal;
4706 // They are both equal and they are not both Smis so both of them are not
4707 // Smis. If it's not a heap number, then return equal.
4708 if (cc == lt || cc == gt) {
4709 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
4710 __ b(ge, slow);
4711 } else {
4712 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4713 __ b(eq, &heap_number);
4714 // Comparing JS objects with <=, >= is complicated.
4715 if (cc != eq) {
4716 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4717 __ b(ge, slow);
Steve Blockd0582a62009-12-15 09:54:21 +00004718 // Normally here we fall through to return_equal, but undefined is
4719 // special: (undefined == undefined) == true, but (undefined <= undefined)
4720 // == false! See ECMAScript 11.8.5.
4721 if (cc == le || cc == ge) {
4722 __ cmp(r4, Operand(ODDBALL_TYPE));
4723 __ b(ne, &return_equal);
4724 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4725 __ cmp(r0, Operand(r2));
4726 __ b(ne, &return_equal);
4727 if (cc == le) {
4728 __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
4729 } else {
4730 __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
4731 }
4732 __ mov(pc, Operand(lr)); // Return.
4733 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004734 }
4735 }
4736 __ bind(&return_equal);
4737 if (cc == lt) {
4738 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4739 } else if (cc == gt) {
4740 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4741 } else {
4742 __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
4743 }
4744 __ mov(pc, Operand(lr)); // Return.
4745
4746 // For less and greater we don't have to check for NaN since the result of
4747 // x < x is false regardless. For the others here is some code to check
4748 // for NaN.
4749 if (cc != lt && cc != gt) {
4750 __ bind(&heap_number);
4751 // It is a heap number, so return non-equal if it's NaN and equal if it's
4752 // not NaN.
4753 // The representation of NaN values has all exponent bits (52..62) set,
4754 // and not all mantissa bits (0..51) clear.
4755 // Read top bits of double representation (second word of value).
4756 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4757 // Test that exponent bits are all set.
4758 __ and_(r3, r2, Operand(exp_mask_reg));
4759 __ cmp(r3, Operand(exp_mask_reg));
4760 __ b(ne, &return_equal);
4761
4762 // Shift out flag and all exponent bits, retaining only mantissa.
4763 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4764 // Or with all low-bits of mantissa.
4765 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4766 __ orr(r0, r3, Operand(r2), SetCC);
4767 // For equal we already have the right value in r0: Return zero (equal)
4768 // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
4769 // (it's a NaN). For <= and >= we need to load r0 with the failing value
4770 // if it's a NaN.
4771 if (cc != eq) {
4772 // All-zero means Infinity means equal.
4773 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4774 if (cc == le) {
4775 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4776 } else {
4777 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4778 }
4779 }
4780 __ mov(pc, Operand(lr)); // Return.
4781 }
4782 // No fall through here.
4783
4784 __ bind(&not_identical);
4785}
4786
4787
4788// See comment at call site.
4789static void EmitSmiNonsmiComparison(MacroAssembler* masm,
4790 Label* rhs_not_nan,
4791 Label* slow,
4792 bool strict) {
4793 Label lhs_is_smi;
4794 __ tst(r0, Operand(kSmiTagMask));
4795 __ b(eq, &lhs_is_smi);
4796
4797 // Rhs is a Smi. Check whether the non-smi is a heap number.
4798 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4799 if (strict) {
4800 // If lhs was not a number and rhs was a Smi then strict equality cannot
4801 // succeed. Return non-equal (r0 is already not zero)
4802 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4803 } else {
4804 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4805 // the runtime.
4806 __ b(ne, slow);
4807 }
4808
4809 // Rhs is a smi, lhs is a number.
4810 __ push(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004811
4812 if (CpuFeatures::IsSupported(VFP3)) {
4813 CpuFeatures::Scope scope(VFP3);
4814 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
4815 } else {
4816 __ mov(r7, Operand(r1));
4817 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4818 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
4819 }
4820
4821
Steve Blocka7e24c12009-10-30 11:49:00 +00004822 // r3 and r2 are rhs as double.
4823 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4824 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4825 // We now have both loaded as doubles but we can skip the lhs nan check
4826 // since it's a Smi.
4827 __ pop(lr);
4828 __ jmp(rhs_not_nan);
4829
4830 __ bind(&lhs_is_smi);
4831 // Lhs is a Smi. Check whether the non-smi is a heap number.
4832 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4833 if (strict) {
4834 // If lhs was not a number and rhs was a Smi then strict equality cannot
4835 // succeed. Return non-equal.
4836 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4837 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4838 } else {
4839 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4840 // the runtime.
4841 __ b(ne, slow);
4842 }
4843
4844 // Lhs is a smi, rhs is a number.
4845 // r0 is Smi and r1 is heap number.
4846 __ push(lr);
4847 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4848 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
Steve Blockd0582a62009-12-15 09:54:21 +00004849
4850 if (CpuFeatures::IsSupported(VFP3)) {
4851 CpuFeatures::Scope scope(VFP3);
4852 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
4853 } else {
4854 __ mov(r7, Operand(r0));
4855 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4856 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
4857 }
4858
Steve Blocka7e24c12009-10-30 11:49:00 +00004859 __ pop(lr);
4860 // Fall through to both_loaded_as_doubles.
4861}
4862
4863
4864void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
4865 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
4866 Register lhs_exponent = exp_first ? r0 : r1;
4867 Register rhs_exponent = exp_first ? r2 : r3;
4868 Register lhs_mantissa = exp_first ? r1 : r0;
4869 Register rhs_mantissa = exp_first ? r3 : r2;
4870 Label one_is_nan, neither_is_nan;
4871
4872 Register exp_mask_reg = r5;
4873
4874 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4875 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
4876 __ cmp(r4, Operand(exp_mask_reg));
4877 __ b(ne, rhs_not_nan);
4878 __ mov(r4,
4879 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4880 SetCC);
4881 __ b(ne, &one_is_nan);
4882 __ cmp(rhs_mantissa, Operand(0));
4883 __ b(ne, &one_is_nan);
4884
4885 __ bind(rhs_not_nan);
4886 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4887 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
4888 __ cmp(r4, Operand(exp_mask_reg));
4889 __ b(ne, &neither_is_nan);
4890 __ mov(r4,
4891 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4892 SetCC);
4893 __ b(ne, &one_is_nan);
4894 __ cmp(lhs_mantissa, Operand(0));
4895 __ b(eq, &neither_is_nan);
4896
4897 __ bind(&one_is_nan);
4898 // NaN comparisons always fail.
4899 // Load whatever we need in r0 to make the comparison fail.
4900 if (cc == lt || cc == le) {
4901 __ mov(r0, Operand(GREATER));
4902 } else {
4903 __ mov(r0, Operand(LESS));
4904 }
4905 __ mov(pc, Operand(lr)); // Return.
4906
4907 __ bind(&neither_is_nan);
4908}
4909
4910
4911// See comment at call site.
4912static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
4913 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
4914 Register lhs_exponent = exp_first ? r0 : r1;
4915 Register rhs_exponent = exp_first ? r2 : r3;
4916 Register lhs_mantissa = exp_first ? r1 : r0;
4917 Register rhs_mantissa = exp_first ? r3 : r2;
4918
4919 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
4920 if (cc == eq) {
4921 // Doubles are not equal unless they have the same bit pattern.
4922 // Exception: 0 and -0.
4923 __ cmp(lhs_mantissa, Operand(rhs_mantissa));
4924 __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
4925 // Return non-zero if the numbers are unequal.
4926 __ mov(pc, Operand(lr), LeaveCC, ne);
4927
4928 __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
4929 // If exponents are equal then return 0.
4930 __ mov(pc, Operand(lr), LeaveCC, eq);
4931
4932 // Exponents are unequal. The only way we can return that the numbers
4933 // are equal is if one is -0 and the other is 0. We already dealt
4934 // with the case where both are -0 or both are 0.
4935 // We start by seeing if the mantissas (that are equal) or the bottom
4936 // 31 bits of the rhs exponent are non-zero. If so we return not
4937 // equal.
4938 __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
4939 __ mov(r0, Operand(r4), LeaveCC, ne);
4940 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
4941 // Now they are equal if and only if the lhs exponent is zero in its
4942 // low 31 bits.
4943 __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
4944 __ mov(pc, Operand(lr));
4945 } else {
4946 // Call a native function to do a comparison between two non-NaNs.
4947 // Call C routine that may not cause GC or other trouble.
4948 __ mov(r5, Operand(ExternalReference::compare_doubles()));
4949 __ Jump(r5); // Tail call.
4950 }
4951}
4952
4953
4954// See comment at call site.
4955static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
4956 // If either operand is a JSObject or an oddball value, then they are
4957 // not equal since their pointers are different.
4958 // There is no test for undetectability in strict equality.
4959 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4960 Label first_non_object;
4961 // Get the type of the first operand into r2 and compare it with
4962 // FIRST_JS_OBJECT_TYPE.
4963 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
4964 __ b(lt, &first_non_object);
4965
4966 // Return non-zero (r0 is not zero)
4967 Label return_not_equal;
4968 __ bind(&return_not_equal);
4969 __ mov(pc, Operand(lr)); // Return.
4970
4971 __ bind(&first_non_object);
4972 // Check for oddballs: true, false, null, undefined.
4973 __ cmp(r2, Operand(ODDBALL_TYPE));
4974 __ b(eq, &return_not_equal);
4975
4976 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
4977 __ b(ge, &return_not_equal);
4978
4979 // Check for oddballs: true, false, null, undefined.
4980 __ cmp(r3, Operand(ODDBALL_TYPE));
4981 __ b(eq, &return_not_equal);
4982}
4983
4984
4985// See comment at call site.
4986static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
4987 Label* both_loaded_as_doubles,
4988 Label* not_heap_numbers,
4989 Label* slow) {
4990 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
4991 __ b(ne, not_heap_numbers);
4992 __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
4993 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
4994
4995 // Both are heap numbers. Load them up then jump to the code we have
4996 // for that.
4997 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4998 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4999 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5000 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5001 __ jmp(both_loaded_as_doubles);
5002}
5003
5004
5005// Fast negative check for symbol-to-symbol equality.
5006static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5007 // r2 is object type of r0.
5008 __ tst(r2, Operand(kIsNotStringMask));
5009 __ b(ne, slow);
5010 __ tst(r2, Operand(kIsSymbolMask));
5011 __ b(eq, slow);
5012 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
5013 __ b(ge, slow);
5014 __ tst(r3, Operand(kIsSymbolMask));
5015 __ b(eq, slow);
5016
5017 // Both are symbols. We already checked they weren't the same pointer
5018 // so they are not equal.
5019 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
5020 __ mov(pc, Operand(lr)); // Return.
5021}
5022
5023
5024// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
5025// positive or negative to indicate the result of the comparison.
5026void CompareStub::Generate(MacroAssembler* masm) {
5027 Label slow; // Call builtin.
5028 Label not_smis, both_loaded_as_doubles, rhs_not_nan;
5029
5030 // NOTICE! This code is only reached after a smi-fast-case check, so
5031 // it is certain that at least one operand isn't a smi.
5032
5033 // Handle the case where the objects are identical. Either returns the answer
5034 // or goes to slow. Only falls through if the objects were not identical.
5035 EmitIdenticalObjectComparison(masm, &slow, cc_);
5036
5037 // If either is a Smi (we know that not both are), then they can only
5038 // be strictly equal if the other is a HeapNumber.
5039 ASSERT_EQ(0, kSmiTag);
5040 ASSERT_EQ(0, Smi::FromInt(0));
5041 __ and_(r2, r0, Operand(r1));
5042 __ tst(r2, Operand(kSmiTagMask));
5043 __ b(ne, &not_smis);
5044 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5045 // 1) Return the answer.
5046 // 2) Go to slow.
5047 // 3) Fall through to both_loaded_as_doubles.
5048 // 4) Jump to rhs_not_nan.
5049 // In cases 3 and 4 we have found out we were dealing with a number-number
5050 // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
5051 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
5052
5053 __ bind(&both_loaded_as_doubles);
5054 // r0, r1, r2, r3 are the double representations of the left hand side
5055 // and the right hand side.
5056
5057 // Checks for NaN in the doubles we have loaded. Can return the answer or
5058 // fall through if neither is a NaN. Also binds rhs_not_nan.
5059 EmitNanCheck(masm, &rhs_not_nan, cc_);
5060
Steve Blockd0582a62009-12-15 09:54:21 +00005061 if (CpuFeatures::IsSupported(VFP3)) {
5062 CpuFeatures::Scope scope(VFP3);
5063 // ARMv7 VFP3 instructions to implement double precision comparison.
5064 __ fmdrr(d6, r0, r1);
5065 __ fmdrr(d7, r2, r3);
5066
5067 __ fcmp(d6, d7);
5068 __ vmrs(pc);
5069 __ mov(r0, Operand(0), LeaveCC, eq);
5070 __ mov(r0, Operand(1), LeaveCC, lt);
5071 __ mvn(r0, Operand(0), LeaveCC, gt);
5072 __ mov(pc, Operand(lr));
5073 } else {
5074 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
5075 // answer. Never falls through.
5076 EmitTwoNonNanDoubleComparison(masm, cc_);
5077 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005078
5079 __ bind(&not_smis);
5080 // At this point we know we are dealing with two different objects,
5081 // and neither of them is a Smi. The objects are in r0 and r1.
5082 if (strict_) {
5083 // This returns non-equal for some object types, or falls through if it
5084 // was not lucky.
5085 EmitStrictTwoHeapObjectCompare(masm);
5086 }
5087
5088 Label check_for_symbols;
5089 // Check for heap-number-heap-number comparison. Can jump to slow case,
5090 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
5091 // that case. If the inputs are not doubles then jumps to check_for_symbols.
5092 // In this case r2 will contain the type of r0.
5093 EmitCheckForTwoHeapNumbers(masm,
5094 &both_loaded_as_doubles,
5095 &check_for_symbols,
5096 &slow);
5097
5098 __ bind(&check_for_symbols);
5099 if (cc_ == eq) {
5100 // Either jumps to slow or returns the answer. Assumes that r2 is the type
5101 // of r0 on entry.
5102 EmitCheckForSymbols(masm, &slow);
5103 }
5104
5105 __ bind(&slow);
5106 __ push(lr);
5107 __ push(r1);
5108 __ push(r0);
5109 // Figure out which native to call and setup the arguments.
5110 Builtins::JavaScript native;
5111 int arg_count = 1; // Not counting receiver.
5112 if (cc_ == eq) {
5113 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5114 } else {
5115 native = Builtins::COMPARE;
5116 int ncr; // NaN compare result
5117 if (cc_ == lt || cc_ == le) {
5118 ncr = GREATER;
5119 } else {
5120 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
5121 ncr = LESS;
5122 }
5123 arg_count++;
5124 __ mov(r0, Operand(Smi::FromInt(ncr)));
5125 __ push(r0);
5126 }
5127
5128 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5129 // tagged as a small integer.
Steve Blocka7e24c12009-10-30 11:49:00 +00005130 __ InvokeBuiltin(native, CALL_JS);
5131 __ cmp(r0, Operand(0));
5132 __ pop(pc);
5133}
5134
5135
5136// Allocates a heap number or jumps to the label if the young space is full and
5137// a scavenge is needed.
5138static void AllocateHeapNumber(
5139 MacroAssembler* masm,
5140 Label* need_gc, // Jump here if young space is full.
5141 Register result, // The tagged address of the new heap number.
5142 Register scratch1, // A scratch register.
5143 Register scratch2) { // Another scratch register.
5144 // Allocate an object in the heap for the heap number and tag it as a heap
5145 // object.
5146 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
5147 result,
5148 scratch1,
5149 scratch2,
5150 need_gc,
5151 TAG_OBJECT);
5152
5153 // Get heap number map and store it in the allocated object.
5154 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
5155 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5156}
5157
5158
5159// We fall into this code if the operands were Smis, but the result was
5160// not (eg. overflow). We branch into this code (to the not_smi label) if
5161// the operands were not both Smi. The operands are in r0 and r1. In order
5162// to call the C-implemented binary fp operation routines we need to end up
5163// with the double precision floating point operands in r0 and r1 (for the
5164// value in r1) and r2 and r3 (for the value in r0).
5165static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5166 Label* not_smi,
5167 const Builtins::JavaScript& builtin,
5168 Token::Value operation,
5169 OverwriteMode mode) {
5170 Label slow, slow_pop_2_first, do_the_call;
5171 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5172 // Smi-smi case (overflow).
5173 // Since both are Smis there is no heap number to overwrite, so allocate.
5174 // The new heap number is in r5. r6 and r7 are scratch.
5175 AllocateHeapNumber(masm, &slow, r5, r6, r7);
Steve Blockd0582a62009-12-15 09:54:21 +00005176
5177 if (CpuFeatures::IsSupported(VFP3)) {
5178 CpuFeatures::Scope scope(VFP3);
5179 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
5180 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
5181 } else {
5182 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5183 __ mov(r7, Operand(r0));
5184 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5185 __ push(lr);
5186 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5187 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5188 __ mov(r7, Operand(r1));
5189 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5190 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5191 __ pop(lr);
5192 }
5193
Steve Blocka7e24c12009-10-30 11:49:00 +00005194 __ jmp(&do_the_call); // Tail call. No return.
5195
5196 // We jump to here if something goes wrong (one param is not a number of any
5197 // sort or new-space allocation fails).
5198 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005199
5200 // Push arguments to the stack
Steve Blocka7e24c12009-10-30 11:49:00 +00005201 __ push(r1);
5202 __ push(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00005203
5204 if (Token::ADD == operation) {
5205 // Test for string arguments before calling runtime.
5206 // r1 : first argument
5207 // r0 : second argument
5208 // sp[0] : second argument
5209 // sp[1] : first argument
5210
5211 Label not_strings, not_string1, string1;
5212 __ tst(r1, Operand(kSmiTagMask));
5213 __ b(eq, &not_string1);
5214 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
5215 __ b(ge, &not_string1);
5216
5217 // First argument is a a string, test second.
5218 __ tst(r0, Operand(kSmiTagMask));
5219 __ b(eq, &string1);
5220 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5221 __ b(ge, &string1);
5222
5223 // First and second argument are strings.
5224 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
5225
5226 // Only first argument is a string.
5227 __ bind(&string1);
5228 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
5229
5230 // First argument was not a string, test second.
5231 __ bind(&not_string1);
5232 __ tst(r0, Operand(kSmiTagMask));
5233 __ b(eq, &not_strings);
5234 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5235 __ b(ge, &not_strings);
5236
5237 // Only second argument is a string.
5238 __ b(&not_strings);
5239 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5240
5241 __ bind(&not_strings);
5242 }
5243
Steve Blocka7e24c12009-10-30 11:49:00 +00005244 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5245
5246 // We branch here if at least one of r0 and r1 is not a Smi.
5247 __ bind(not_smi);
5248 if (mode == NO_OVERWRITE) {
5249 // In the case where there is no chance of an overwritable float we may as
5250 // well do the allocation immediately while r0 and r1 are untouched.
5251 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5252 }
5253
5254 // Move r0 to a double in r2-r3.
5255 __ tst(r0, Operand(kSmiTagMask));
5256 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5257 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5258 __ b(ne, &slow);
5259 if (mode == OVERWRITE_RIGHT) {
5260 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5261 }
5262 // Calling convention says that second double is in r2 and r3.
5263 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5264 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5265 __ jmp(&finished_loading_r0);
5266 __ bind(&r0_is_smi);
5267 if (mode == OVERWRITE_RIGHT) {
5268 // We can't overwrite a Smi so get address of new heap number into r5.
5269 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5270 }
Steve Blockd0582a62009-12-15 09:54:21 +00005271
5272
5273 if (CpuFeatures::IsSupported(VFP3)) {
5274 CpuFeatures::Scope scope(VFP3);
5275 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
5276 } else {
5277 // Write Smi from r0 to r3 and r2 in double format.
5278 __ mov(r7, Operand(r0));
5279 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5280 __ push(lr);
5281 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5282 __ pop(lr);
5283 }
5284
Steve Blocka7e24c12009-10-30 11:49:00 +00005285 __ bind(&finished_loading_r0);
5286
5287 // Move r1 to a double in r0-r1.
5288 __ tst(r1, Operand(kSmiTagMask));
5289 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5290 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5291 __ b(ne, &slow);
5292 if (mode == OVERWRITE_LEFT) {
5293 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5294 }
5295 // Calling convention says that first double is in r0 and r1.
5296 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5297 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5298 __ jmp(&finished_loading_r1);
5299 __ bind(&r1_is_smi);
5300 if (mode == OVERWRITE_LEFT) {
5301 // We can't overwrite a Smi so get address of new heap number into r5.
5302 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5303 }
Steve Blockd0582a62009-12-15 09:54:21 +00005304
5305 if (CpuFeatures::IsSupported(VFP3)) {
5306 CpuFeatures::Scope scope(VFP3);
5307 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
5308 } else {
5309 // Write Smi from r1 to r1 and r0 in double format.
5310 __ mov(r7, Operand(r1));
5311 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5312 __ push(lr);
5313 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5314 __ pop(lr);
5315 }
5316
Steve Blocka7e24c12009-10-30 11:49:00 +00005317 __ bind(&finished_loading_r1);
5318
5319 __ bind(&do_the_call);
5320 // r0: Left value (least significant part of mantissa).
5321 // r1: Left value (sign, exponent, top of mantissa).
5322 // r2: Right value (least significant part of mantissa).
5323 // r3: Right value (sign, exponent, top of mantissa).
5324 // r5: Address of heap number for result.
Steve Blockd0582a62009-12-15 09:54:21 +00005325
5326 if (CpuFeatures::IsSupported(VFP3) &&
5327 ((Token::MUL == operation) ||
5328 (Token::DIV == operation) ||
5329 (Token::ADD == operation) ||
5330 (Token::SUB == operation))) {
5331 CpuFeatures::Scope scope(VFP3);
5332 // ARMv7 VFP3 instructions to implement
5333 // double precision, add, subtract, multiply, divide.
5334 __ fmdrr(d6, r0, r1);
5335 __ fmdrr(d7, r2, r3);
5336
5337 if (Token::MUL == operation) {
5338 __ fmuld(d5, d6, d7);
5339 } else if (Token::DIV == operation) {
5340 __ fdivd(d5, d6, d7);
5341 } else if (Token::ADD == operation) {
5342 __ faddd(d5, d6, d7);
5343 } else if (Token::SUB == operation) {
5344 __ fsubd(d5, d6, d7);
5345 } else {
5346 UNREACHABLE();
5347 }
5348
5349 __ fmrrd(r0, r1, d5);
5350
5351 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
5352 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
5353 __ mov(r0, Operand(r5));
5354 __ mov(pc, lr);
5355 return;
5356 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005357 __ push(lr); // For later.
5358 __ push(r5); // Address of heap number that is answer.
5359 __ AlignStack(0);
5360 // Call C routine that may not cause GC or other trouble.
5361 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5362 __ Call(r5);
5363 __ pop(r4); // Address of heap number.
5364 __ cmp(r4, Operand(Smi::FromInt(0)));
5365 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5366 // Store answer in the overwritable heap number.
5367#if !defined(USE_ARM_EABI)
5368 // Double returned in fp coprocessor register 0 and 1, encoded as register
5369 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5370 // substract the tag from r4.
5371 __ sub(r5, r4, Operand(kHeapObjectTag));
5372 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5373#else
5374 // Double returned in registers 0 and 1.
5375 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5376 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5377#endif
5378 __ mov(r0, Operand(r4));
5379 // And we are done.
5380 __ pop(pc);
5381}
5382
5383
5384// Tries to get a signed int32 out of a double precision floating point heap
5385// number. Rounds towards 0. Fastest for doubles that are in the ranges
5386// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5387// almost to the range of signed int32 values that are not Smis. Jumps to the
5388// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5389// (excluding the endpoints).
5390static void GetInt32(MacroAssembler* masm,
5391 Register source,
5392 Register dest,
5393 Register scratch,
5394 Register scratch2,
5395 Label* slow) {
5396 Label right_exponent, done;
5397 // Get exponent word.
5398 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5399 // Get exponent alone in scratch2.
5400 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5401 // Load dest with zero. We use this either for the final shift or
5402 // for the answer.
5403 __ mov(dest, Operand(0));
5404 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5405 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5406 // the exponent that we are fastest at and also the highest exponent we can
5407 // handle here.
5408 const uint32_t non_smi_exponent =
5409 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5410 __ cmp(scratch2, Operand(non_smi_exponent));
5411 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5412 __ b(eq, &right_exponent);
5413 // If the exponent is higher than that then go to slow case. This catches
5414 // numbers that don't fit in a signed int32, infinities and NaNs.
5415 __ b(gt, slow);
5416
5417 // We know the exponent is smaller than 30 (biased). If it is less than
5418 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5419 // it rounds to zero.
5420 const uint32_t zero_exponent =
5421 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5422 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5423 // Dest already has a Smi zero.
5424 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00005425 if (!CpuFeatures::IsSupported(VFP3)) {
5426 // We have a shifted exponent between 0 and 30 in scratch2.
5427 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5428 // We now have the exponent in dest. Subtract from 30 to get
5429 // how much to shift down.
5430 __ rsb(dest, dest, Operand(30));
5431 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005432 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00005433 if (CpuFeatures::IsSupported(VFP3)) {
5434 CpuFeatures::Scope scope(VFP3);
5435 // ARMv7 VFP3 instructions implementing double precision to integer
5436 // conversion using round to zero.
5437 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5438 __ fmdrr(d7, scratch2, scratch);
5439 __ ftosid(s15, d7);
5440 __ fmrs(dest, s15);
5441 } else {
5442 // Get the top bits of the mantissa.
5443 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5444 // Put back the implicit 1.
5445 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5446 // Shift up the mantissa bits to take up the space the exponent used to
5447 // take. We just orred in the implicit bit so that took care of one and
5448 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
5449 // distance.
5450 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5451 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5452 // Put sign in zero flag.
5453 __ tst(scratch, Operand(HeapNumber::kSignMask));
5454 // Get the second half of the double. For some exponents we don't
5455 // actually need this because the bits get shifted out again, but
5456 // it's probably slower to test than just to do it.
5457 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5458 // Shift down 22 bits to get the last 10 bits.
5459 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5460 // Move down according to the exponent.
5461 __ mov(dest, Operand(scratch, LSR, dest));
5462 // Fix sign if sign bit was set.
5463 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5464 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005465 __ bind(&done);
5466}
5467
Steve Blocka7e24c12009-10-30 11:49:00 +00005468// For bitwise ops where the inputs are not both Smis we here try to determine
5469// whether both inputs are either Smis or at least heap numbers that can be
5470// represented by a 32 bit signed value. We truncate towards zero as required
5471// by the ES spec. If this is the case we do the bitwise op and see if the
5472// result is a Smi. If so, great, otherwise we try to find a heap number to
5473// write the answer into (either by allocating or by overwriting).
5474// On entry the operands are in r0 and r1. On exit the answer is in r0.
5475void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5476 Label slow, result_not_a_smi;
5477 Label r0_is_smi, r1_is_smi;
5478 Label done_checking_r0, done_checking_r1;
5479
5480 __ tst(r1, Operand(kSmiTagMask));
5481 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5482 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5483 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005484 GetInt32(masm, r1, r3, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005485 __ jmp(&done_checking_r1);
5486 __ bind(&r1_is_smi);
5487 __ mov(r3, Operand(r1, ASR, 1));
5488 __ bind(&done_checking_r1);
5489
5490 __ tst(r0, Operand(kSmiTagMask));
5491 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5492 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5493 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005494 GetInt32(masm, r0, r2, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005495 __ jmp(&done_checking_r0);
5496 __ bind(&r0_is_smi);
5497 __ mov(r2, Operand(r0, ASR, 1));
5498 __ bind(&done_checking_r0);
5499
5500 // r0 and r1: Original operands (Smi or heap numbers).
5501 // r2 and r3: Signed int32 operands.
5502 switch (op_) {
5503 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5504 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5505 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5506 case Token::SAR:
5507 // Use only the 5 least significant bits of the shift count.
5508 __ and_(r2, r2, Operand(0x1f));
5509 __ mov(r2, Operand(r3, ASR, r2));
5510 break;
5511 case Token::SHR:
5512 // Use only the 5 least significant bits of the shift count.
5513 __ and_(r2, r2, Operand(0x1f));
5514 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5515 // SHR is special because it is required to produce a positive answer.
5516 // The code below for writing into heap numbers isn't capable of writing
5517 // the register as an unsigned int so we go to slow case if we hit this
5518 // case.
5519 __ b(mi, &slow);
5520 break;
5521 case Token::SHL:
5522 // Use only the 5 least significant bits of the shift count.
5523 __ and_(r2, r2, Operand(0x1f));
5524 __ mov(r2, Operand(r3, LSL, r2));
5525 break;
5526 default: UNREACHABLE();
5527 }
5528 // check that the *signed* result fits in a smi
5529 __ add(r3, r2, Operand(0x40000000), SetCC);
5530 __ b(mi, &result_not_a_smi);
5531 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5532 __ Ret();
5533
5534 Label have_to_allocate, got_a_heap_number;
5535 __ bind(&result_not_a_smi);
5536 switch (mode_) {
5537 case OVERWRITE_RIGHT: {
5538 __ tst(r0, Operand(kSmiTagMask));
5539 __ b(eq, &have_to_allocate);
5540 __ mov(r5, Operand(r0));
5541 break;
5542 }
5543 case OVERWRITE_LEFT: {
5544 __ tst(r1, Operand(kSmiTagMask));
5545 __ b(eq, &have_to_allocate);
5546 __ mov(r5, Operand(r1));
5547 break;
5548 }
5549 case NO_OVERWRITE: {
5550 // Get a new heap number in r5. r6 and r7 are scratch.
5551 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5552 }
5553 default: break;
5554 }
5555 __ bind(&got_a_heap_number);
5556 // r2: Answer as signed int32.
5557 // r5: Heap number to write answer into.
5558
5559 // Nothing can go wrong now, so move the heap number to r0, which is the
5560 // result.
5561 __ mov(r0, Operand(r5));
5562
5563 // Tail call that writes the int32 in r2 to the heap number in r0, using
5564 // r3 as scratch. r0 is preserved and returned.
5565 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5566 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5567
5568 if (mode_ != NO_OVERWRITE) {
5569 __ bind(&have_to_allocate);
5570 // Get a new heap number in r5. r6 and r7 are scratch.
5571 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5572 __ jmp(&got_a_heap_number);
5573 }
5574
5575 // If all else failed then we go to the runtime system.
5576 __ bind(&slow);
5577 __ push(r1); // restore stack
5578 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005579 switch (op_) {
5580 case Token::BIT_OR:
5581 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5582 break;
5583 case Token::BIT_AND:
5584 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5585 break;
5586 case Token::BIT_XOR:
5587 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5588 break;
5589 case Token::SAR:
5590 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5591 break;
5592 case Token::SHR:
5593 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5594 break;
5595 case Token::SHL:
5596 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5597 break;
5598 default:
5599 UNREACHABLE();
5600 }
5601}
5602
5603
5604// Can we multiply by x with max two shifts and an add.
5605// This answers yes to all integers from 2 to 10.
5606static bool IsEasyToMultiplyBy(int x) {
5607 if (x < 2) return false; // Avoid special cases.
5608 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5609 if (IsPowerOf2(x)) return true; // Simple shift.
5610 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5611 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5612 return false;
5613}
5614
5615
5616// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5617// Source and destination may be the same register. This routine does
5618// not set carry and overflow the way a mul instruction would.
5619static void MultiplyByKnownInt(MacroAssembler* masm,
5620 Register source,
5621 Register destination,
5622 int known_int) {
5623 if (IsPowerOf2(known_int)) {
5624 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5625 } else if (PopCountLessThanEqual2(known_int)) {
5626 int first_bit = BitPosition(known_int);
5627 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5628 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5629 if (first_bit != 0) {
5630 __ mov(destination, Operand(destination, LSL, first_bit));
5631 }
5632 } else {
5633 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5634 int the_bit = BitPosition(known_int + 1);
5635 __ rsb(destination, source, Operand(source, LSL, the_bit));
5636 }
5637}
5638
5639
5640// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5641// a register for the cases where it doesn't know a good trick, and may deliver
5642// a result that needs shifting.
5643static void MultiplyByKnownInt2(
5644 MacroAssembler* masm,
5645 Register result,
5646 Register source,
5647 Register known_int_register, // Smi tagged.
5648 int known_int,
5649 int* required_shift) { // Including Smi tag shift
5650 switch (known_int) {
5651 case 3:
5652 __ add(result, source, Operand(source, LSL, 1));
5653 *required_shift = 1;
5654 break;
5655 case 5:
5656 __ add(result, source, Operand(source, LSL, 2));
5657 *required_shift = 1;
5658 break;
5659 case 6:
5660 __ add(result, source, Operand(source, LSL, 1));
5661 *required_shift = 2;
5662 break;
5663 case 7:
5664 __ rsb(result, source, Operand(source, LSL, 3));
5665 *required_shift = 1;
5666 break;
5667 case 9:
5668 __ add(result, source, Operand(source, LSL, 3));
5669 *required_shift = 1;
5670 break;
5671 case 10:
5672 __ add(result, source, Operand(source, LSL, 2));
5673 *required_shift = 2;
5674 break;
5675 default:
5676 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5677 __ mul(result, source, known_int_register);
5678 *required_shift = 0;
5679 }
5680}
5681
5682
5683void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5684 // r1 : x
5685 // r0 : y
5686 // result : r0
5687
5688 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5689 // tell us that.
5690 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5691
5692 switch (op_) {
5693 case Token::ADD: {
5694 Label not_smi;
5695 // Fast path.
5696 ASSERT(kSmiTag == 0); // Adjust code below.
5697 __ tst(r2, Operand(kSmiTagMask));
5698 __ b(ne, &not_smi);
5699 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5700 // Return if no overflow.
5701 __ Ret(vc);
5702 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5703
5704 HandleBinaryOpSlowCases(masm,
5705 &not_smi,
5706 Builtins::ADD,
5707 Token::ADD,
5708 mode_);
5709 break;
5710 }
5711
5712 case Token::SUB: {
5713 Label not_smi;
5714 // Fast path.
5715 ASSERT(kSmiTag == 0); // Adjust code below.
5716 __ tst(r2, Operand(kSmiTagMask));
5717 __ b(ne, &not_smi);
5718 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5719 // Return if no overflow.
5720 __ Ret(vc);
5721 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5722
5723 HandleBinaryOpSlowCases(masm,
5724 &not_smi,
5725 Builtins::SUB,
5726 Token::SUB,
5727 mode_);
5728 break;
5729 }
5730
5731 case Token::MUL: {
5732 Label not_smi, slow;
5733 ASSERT(kSmiTag == 0); // adjust code below
5734 __ tst(r2, Operand(kSmiTagMask));
5735 __ b(ne, &not_smi);
5736 // Remove tag from one operand (but keep sign), so that result is Smi.
5737 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
5738 // Do multiplication
5739 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
5740 // Go slow on overflows (overflow bit is not set).
5741 __ mov(ip, Operand(r3, ASR, 31));
5742 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
5743 __ b(ne, &slow);
5744 // Go slow on zero result to handle -0.
5745 __ tst(r3, Operand(r3));
5746 __ mov(r0, Operand(r3), LeaveCC, ne);
5747 __ Ret(ne);
5748 // We need -0 if we were multiplying a negative number with 0 to get 0.
5749 // We know one of them was zero.
5750 __ add(r2, r0, Operand(r1), SetCC);
5751 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5752 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5753 // Slow case. We fall through here if we multiplied a negative number
5754 // with 0, because that would mean we should produce -0.
5755 __ bind(&slow);
5756
5757 HandleBinaryOpSlowCases(masm,
5758 &not_smi,
5759 Builtins::MUL,
5760 Token::MUL,
5761 mode_);
5762 break;
5763 }
5764
5765 case Token::DIV:
5766 case Token::MOD: {
5767 Label not_smi;
5768 if (specialized_on_rhs_) {
5769 Label smi_is_unsuitable;
5770 __ BranchOnNotSmi(r1, &not_smi);
5771 if (IsPowerOf2(constant_rhs_)) {
5772 if (op_ == Token::MOD) {
5773 __ and_(r0,
5774 r1,
5775 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
5776 SetCC);
5777 // We now have the answer, but if the input was negative we also
5778 // have the sign bit. Our work is done if the result is
5779 // positive or zero:
5780 __ Ret(pl);
5781 // A mod of a negative left hand side must return a negative number.
5782 // Unfortunately if the answer is 0 then we must return -0. And we
5783 // already optimistically trashed r0 so we may need to restore it.
5784 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5785 // Next two instructions are conditional on the answer being -0.
5786 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
5787 __ b(eq, &smi_is_unsuitable);
5788 // We need to subtract the dividend. Eg. -3 % 4 == -3.
5789 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
5790 } else {
5791 ASSERT(op_ == Token::DIV);
5792 __ tst(r1,
5793 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
5794 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5795 int shift = 0;
5796 int d = constant_rhs_;
5797 while ((d & 1) == 0) {
5798 d >>= 1;
5799 shift++;
5800 }
5801 __ mov(r0, Operand(r1, LSR, shift));
5802 __ bic(r0, r0, Operand(kSmiTagMask));
5803 }
5804 } else {
5805 // Not a power of 2.
5806 __ tst(r1, Operand(0x80000000u));
5807 __ b(ne, &smi_is_unsuitable);
5808 // Find a fixed point reciprocal of the divisor so we can divide by
5809 // multiplying.
5810 double divisor = 1.0 / constant_rhs_;
5811 int shift = 32;
5812 double scale = 4294967296.0; // 1 << 32.
5813 uint32_t mul;
5814 // Maximise the precision of the fixed point reciprocal.
5815 while (true) {
5816 mul = static_cast<uint32_t>(scale * divisor);
5817 if (mul >= 0x7fffffff) break;
5818 scale *= 2.0;
5819 shift++;
5820 }
5821 mul++;
5822 __ mov(r2, Operand(mul));
5823 __ umull(r3, r2, r2, r1);
5824 __ mov(r2, Operand(r2, LSR, shift - 31));
5825 // r2 is r1 / rhs. r2 is not Smi tagged.
5826 // r0 is still the known rhs. r0 is Smi tagged.
5827 // r1 is still the unkown lhs. r1 is Smi tagged.
5828 int required_r4_shift = 0; // Including the Smi tag shift of 1.
5829 // r4 = r2 * r0.
5830 MultiplyByKnownInt2(masm,
5831 r4,
5832 r2,
5833 r0,
5834 constant_rhs_,
5835 &required_r4_shift);
5836 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
5837 if (op_ == Token::DIV) {
5838 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
5839 __ b(ne, &smi_is_unsuitable); // There was a remainder.
5840 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5841 } else {
5842 ASSERT(op_ == Token::MOD);
5843 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
5844 }
5845 }
5846 __ Ret();
5847 __ bind(&smi_is_unsuitable);
5848 } else {
5849 __ jmp(&not_smi);
5850 }
5851 HandleBinaryOpSlowCases(masm,
5852 &not_smi,
5853 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
5854 op_,
5855 mode_);
5856 break;
5857 }
5858
5859 case Token::BIT_OR:
5860 case Token::BIT_AND:
5861 case Token::BIT_XOR:
5862 case Token::SAR:
5863 case Token::SHR:
5864 case Token::SHL: {
5865 Label slow;
5866 ASSERT(kSmiTag == 0); // adjust code below
5867 __ tst(r2, Operand(kSmiTagMask));
5868 __ b(ne, &slow);
5869 switch (op_) {
5870 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
5871 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
5872 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
5873 case Token::SAR:
5874 // Remove tags from right operand.
5875 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5876 // Use only the 5 least significant bits of the shift count.
5877 __ and_(r2, r2, Operand(0x1f));
5878 __ mov(r0, Operand(r1, ASR, r2));
5879 // Smi tag result.
5880 __ bic(r0, r0, Operand(kSmiTagMask));
5881 break;
5882 case Token::SHR:
5883 // Remove tags from operands. We can't do this on a 31 bit number
5884 // because then the 0s get shifted into bit 30 instead of bit 31.
5885 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5886 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5887 // Use only the 5 least significant bits of the shift count.
5888 __ and_(r2, r2, Operand(0x1f));
5889 __ mov(r3, Operand(r3, LSR, r2));
5890 // Unsigned shift is not allowed to produce a negative number, so
5891 // check the sign bit and the sign bit after Smi tagging.
5892 __ tst(r3, Operand(0xc0000000));
5893 __ b(ne, &slow);
5894 // Smi tag result.
5895 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5896 break;
5897 case Token::SHL:
5898 // Remove tags from operands.
5899 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5900 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5901 // Use only the 5 least significant bits of the shift count.
5902 __ and_(r2, r2, Operand(0x1f));
5903 __ mov(r3, Operand(r3, LSL, r2));
5904 // Check that the signed result fits in a Smi.
5905 __ add(r2, r3, Operand(0x40000000), SetCC);
5906 __ b(mi, &slow);
5907 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5908 break;
5909 default: UNREACHABLE();
5910 }
5911 __ Ret();
5912 __ bind(&slow);
5913 HandleNonSmiBitwiseOp(masm);
5914 break;
5915 }
5916
5917 default: UNREACHABLE();
5918 }
5919 // This code should be unreachable.
5920 __ stop("Unreachable");
5921}
5922
5923
5924void StackCheckStub::Generate(MacroAssembler* masm) {
5925 // Do tail-call to runtime routine. Runtime routines expect at least one
5926 // argument, so give it a Smi.
5927 __ mov(r0, Operand(Smi::FromInt(0)));
5928 __ push(r0);
5929 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
5930
5931 __ StubReturn(1);
5932}
5933
5934
5935void UnarySubStub::Generate(MacroAssembler* masm) {
5936 Label undo;
5937 Label slow;
5938 Label not_smi;
5939
5940 // Enter runtime system if the value is not a smi.
5941 __ tst(r0, Operand(kSmiTagMask));
5942 __ b(ne, &not_smi);
5943
5944 // Enter runtime system if the value of the expression is zero
5945 // to make sure that we switch between 0 and -0.
5946 __ cmp(r0, Operand(0));
5947 __ b(eq, &slow);
5948
5949 // The value of the expression is a smi that is not zero. Try
5950 // optimistic subtraction '0 - value'.
5951 __ rsb(r1, r0, Operand(0), SetCC);
5952 __ b(vs, &slow);
5953
5954 __ mov(r0, Operand(r1)); // Set r0 to result.
5955 __ StubReturn(1);
5956
5957 // Enter runtime system.
5958 __ bind(&slow);
5959 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005960 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
5961
5962 __ bind(&not_smi);
5963 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
5964 __ b(ne, &slow);
5965 // r0 is a heap number. Get a new heap number in r1.
5966 if (overwrite_) {
5967 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5968 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
5969 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5970 } else {
5971 AllocateHeapNumber(masm, &slow, r1, r2, r3);
5972 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
5973 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
5974 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
5975 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
5976 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
5977 __ mov(r0, Operand(r1));
5978 }
5979 __ StubReturn(1);
5980}
5981
5982
5983int CEntryStub::MinorKey() {
5984 ASSERT(result_size_ <= 2);
5985 // Result returned in r0 or r0+r1 by default.
5986 return 0;
5987}
5988
5989
5990void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
5991 // r0 holds the exception.
5992
5993 // Adjust this code if not the case.
5994 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
5995
5996 // Drop the sp to the top of the handler.
5997 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
5998 __ ldr(sp, MemOperand(r3));
5999
6000 // Restore the next handler and frame pointer, discard handler state.
6001 ASSERT(StackHandlerConstants::kNextOffset == 0);
6002 __ pop(r2);
6003 __ str(r2, MemOperand(r3));
6004 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6005 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
6006
6007 // Before returning we restore the context from the frame pointer if
6008 // not NULL. The frame pointer is NULL in the exception handler of a
6009 // JS entry frame.
6010 __ cmp(fp, Operand(0));
6011 // Set cp to NULL if fp is NULL.
6012 __ mov(cp, Operand(0), LeaveCC, eq);
6013 // Restore cp otherwise.
6014 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6015#ifdef DEBUG
6016 if (FLAG_debug_code) {
6017 __ mov(lr, Operand(pc));
6018 }
6019#endif
6020 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6021 __ pop(pc);
6022}
6023
6024
6025void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6026 UncatchableExceptionType type) {
6027 // Adjust this code if not the case.
6028 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6029
6030 // Drop sp to the top stack handler.
6031 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6032 __ ldr(sp, MemOperand(r3));
6033
6034 // Unwind the handlers until the ENTRY handler is found.
6035 Label loop, done;
6036 __ bind(&loop);
6037 // Load the type of the current stack handler.
6038 const int kStateOffset = StackHandlerConstants::kStateOffset;
6039 __ ldr(r2, MemOperand(sp, kStateOffset));
6040 __ cmp(r2, Operand(StackHandler::ENTRY));
6041 __ b(eq, &done);
6042 // Fetch the next handler in the list.
6043 const int kNextOffset = StackHandlerConstants::kNextOffset;
6044 __ ldr(sp, MemOperand(sp, kNextOffset));
6045 __ jmp(&loop);
6046 __ bind(&done);
6047
6048 // Set the top handler address to next handler past the current ENTRY handler.
6049 ASSERT(StackHandlerConstants::kNextOffset == 0);
6050 __ pop(r2);
6051 __ str(r2, MemOperand(r3));
6052
6053 if (type == OUT_OF_MEMORY) {
6054 // Set external caught exception to false.
6055 ExternalReference external_caught(Top::k_external_caught_exception_address);
6056 __ mov(r0, Operand(false));
6057 __ mov(r2, Operand(external_caught));
6058 __ str(r0, MemOperand(r2));
6059
6060 // Set pending exception and r0 to out of memory exception.
6061 Failure* out_of_memory = Failure::OutOfMemoryException();
6062 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6063 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
6064 __ str(r0, MemOperand(r2));
6065 }
6066
6067 // Stack layout at this point. See also StackHandlerConstants.
6068 // sp -> state (ENTRY)
6069 // fp
6070 // lr
6071
6072 // Discard handler state (r2 is not used) and restore frame pointer.
6073 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6074 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
6075 // Before returning we restore the context from the frame pointer if
6076 // not NULL. The frame pointer is NULL in the exception handler of a
6077 // JS entry frame.
6078 __ cmp(fp, Operand(0));
6079 // Set cp to NULL if fp is NULL.
6080 __ mov(cp, Operand(0), LeaveCC, eq);
6081 // Restore cp otherwise.
6082 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6083#ifdef DEBUG
6084 if (FLAG_debug_code) {
6085 __ mov(lr, Operand(pc));
6086 }
6087#endif
6088 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6089 __ pop(pc);
6090}
6091
6092
6093void CEntryStub::GenerateCore(MacroAssembler* masm,
6094 Label* throw_normal_exception,
6095 Label* throw_termination_exception,
6096 Label* throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006097 ExitFrame::Mode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006098 bool do_gc,
6099 bool always_allocate) {
6100 // r0: result parameter for PerformGC, if any
6101 // r4: number of arguments including receiver (C callee-saved)
6102 // r5: pointer to builtin function (C callee-saved)
6103 // r6: pointer to the first argument (C callee-saved)
6104
6105 if (do_gc) {
6106 // Passing r0.
6107 ExternalReference gc_reference = ExternalReference::perform_gc_function();
6108 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
6109 }
6110
6111 ExternalReference scope_depth =
6112 ExternalReference::heap_always_allocate_scope_depth();
6113 if (always_allocate) {
6114 __ mov(r0, Operand(scope_depth));
6115 __ ldr(r1, MemOperand(r0));
6116 __ add(r1, r1, Operand(1));
6117 __ str(r1, MemOperand(r0));
6118 }
6119
6120 // Call C built-in.
6121 // r0 = argc, r1 = argv
6122 __ mov(r0, Operand(r4));
6123 __ mov(r1, Operand(r6));
6124
6125 // TODO(1242173): To let the GC traverse the return address of the exit
6126 // frames, we need to know where the return address is. Right now,
6127 // we push it on the stack to be able to find it again, but we never
6128 // restore from it in case of changes, which makes it impossible to
6129 // support moving the C entry code stub. This should be fixed, but currently
6130 // this is OK because the CEntryStub gets generated so early in the V8 boot
6131 // sequence that it is not moving ever.
6132 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
6133 masm->push(lr);
6134 masm->Jump(r5);
6135
6136 if (always_allocate) {
6137 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
6138 // though (contain the result).
6139 __ mov(r2, Operand(scope_depth));
6140 __ ldr(r3, MemOperand(r2));
6141 __ sub(r3, r3, Operand(1));
6142 __ str(r3, MemOperand(r2));
6143 }
6144
6145 // check for failure result
6146 Label failure_returned;
6147 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6148 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
6149 __ add(r2, r0, Operand(1));
6150 __ tst(r2, Operand(kFailureTagMask));
6151 __ b(eq, &failure_returned);
6152
6153 // Exit C frame and return.
6154 // r0:r1: result
6155 // sp: stack pointer
6156 // fp: frame pointer
Steve Blockd0582a62009-12-15 09:54:21 +00006157 __ LeaveExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006158
6159 // check if we should retry or throw exception
6160 Label retry;
6161 __ bind(&failure_returned);
6162 ASSERT(Failure::RETRY_AFTER_GC == 0);
6163 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6164 __ b(eq, &retry);
6165
6166 // Special handling of out of memory exceptions.
6167 Failure* out_of_memory = Failure::OutOfMemoryException();
6168 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6169 __ b(eq, throw_out_of_memory_exception);
6170
6171 // Retrieve the pending exception and clear the variable.
6172 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6173 __ ldr(r3, MemOperand(ip));
6174 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6175 __ ldr(r0, MemOperand(ip));
6176 __ str(r3, MemOperand(ip));
6177
6178 // Special handling of termination exceptions which are uncatchable
6179 // by javascript code.
6180 __ cmp(r0, Operand(Factory::termination_exception()));
6181 __ b(eq, throw_termination_exception);
6182
6183 // Handle normal exception.
6184 __ jmp(throw_normal_exception);
6185
6186 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
6187}
6188
6189
6190void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
6191 // Called from JavaScript; parameters are on stack as if calling JS function
6192 // r0: number of arguments including receiver
6193 // r1: pointer to builtin function
6194 // fp: frame pointer (restored after C call)
6195 // sp: stack pointer (restored as callee's sp after C call)
6196 // cp: current context (C callee-saved)
6197
6198 // NOTE: Invocations of builtins may return failure objects
6199 // instead of a proper result. The builtin entry handles
6200 // this by performing a garbage collection and retrying the
6201 // builtin once.
6202
Steve Blockd0582a62009-12-15 09:54:21 +00006203 ExitFrame::Mode mode = is_debug_break
6204 ? ExitFrame::MODE_DEBUG
6205 : ExitFrame::MODE_NORMAL;
Steve Blocka7e24c12009-10-30 11:49:00 +00006206
6207 // Enter the exit frame that transitions from JavaScript to C++.
Steve Blockd0582a62009-12-15 09:54:21 +00006208 __ EnterExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006209
6210 // r4: number of arguments (C callee-saved)
6211 // r5: pointer to builtin function (C callee-saved)
6212 // r6: pointer to first argument (C callee-saved)
6213
6214 Label throw_normal_exception;
6215 Label throw_termination_exception;
6216 Label throw_out_of_memory_exception;
6217
6218 // Call into the runtime system.
6219 GenerateCore(masm,
6220 &throw_normal_exception,
6221 &throw_termination_exception,
6222 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006223 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006224 false,
6225 false);
6226
6227 // Do space-specific GC and retry runtime call.
6228 GenerateCore(masm,
6229 &throw_normal_exception,
6230 &throw_termination_exception,
6231 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006232 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006233 true,
6234 false);
6235
6236 // Do full GC and retry runtime call one final time.
6237 Failure* failure = Failure::InternalError();
6238 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
6239 GenerateCore(masm,
6240 &throw_normal_exception,
6241 &throw_termination_exception,
6242 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006243 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006244 true,
6245 true);
6246
6247 __ bind(&throw_out_of_memory_exception);
6248 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
6249
6250 __ bind(&throw_termination_exception);
6251 GenerateThrowUncatchable(masm, TERMINATION);
6252
6253 __ bind(&throw_normal_exception);
6254 GenerateThrowTOS(masm);
6255}
6256
6257
6258void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
6259 // r0: code entry
6260 // r1: function
6261 // r2: receiver
6262 // r3: argc
6263 // [sp+0]: argv
6264
6265 Label invoke, exit;
6266
6267 // Called from C, so do not pop argc and args on exit (preserve sp)
6268 // No need to save register-passed args
6269 // Save callee-saved registers (incl. cp and fp), sp, and lr
6270 __ stm(db_w, sp, kCalleeSaved | lr.bit());
6271
6272 // Get address of argv, see stm above.
6273 // r0: code entry
6274 // r1: function
6275 // r2: receiver
6276 // r3: argc
6277 __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
6278 __ ldr(r4, MemOperand(r4)); // argv
6279
6280 // Push a frame with special values setup to mark it as an entry frame.
6281 // r0: code entry
6282 // r1: function
6283 // r2: receiver
6284 // r3: argc
6285 // r4: argv
6286 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
6287 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
6288 __ mov(r7, Operand(Smi::FromInt(marker)));
6289 __ mov(r6, Operand(Smi::FromInt(marker)));
6290 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6291 __ ldr(r5, MemOperand(r5));
6292 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
6293
6294 // Setup frame pointer for the frame to be pushed.
6295 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6296
6297 // Call a faked try-block that does the invoke.
6298 __ bl(&invoke);
6299
6300 // Caught exception: Store result (exception) in the pending
6301 // exception field in the JSEnv and return a failure sentinel.
6302 // Coming in here the fp will be invalid because the PushTryHandler below
6303 // sets it to 0 to signal the existence of the JSEntry frame.
6304 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6305 __ str(r0, MemOperand(ip));
6306 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6307 __ b(&exit);
6308
6309 // Invoke: Link this frame into the handler chain.
6310 __ bind(&invoke);
6311 // Must preserve r0-r4, r5-r7 are available.
6312 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6313 // If an exception not caught by another handler occurs, this handler
6314 // returns control to the code after the bl(&invoke) above, which
6315 // restores all kCalleeSaved registers (including cp and fp) to their
6316 // saved values before returning a failure to C.
6317
6318 // Clear any pending exceptions.
6319 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6320 __ ldr(r5, MemOperand(ip));
6321 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6322 __ str(r5, MemOperand(ip));
6323
6324 // Invoke the function by calling through JS entry trampoline builtin.
6325 // Notice that we cannot store a reference to the trampoline code directly in
6326 // this stub, because runtime stubs are not traversed when doing GC.
6327
6328 // Expected registers by Builtins::JSEntryTrampoline
6329 // r0: code entry
6330 // r1: function
6331 // r2: receiver
6332 // r3: argc
6333 // r4: argv
6334 if (is_construct) {
6335 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6336 __ mov(ip, Operand(construct_entry));
6337 } else {
6338 ExternalReference entry(Builtins::JSEntryTrampoline);
6339 __ mov(ip, Operand(entry));
6340 }
6341 __ ldr(ip, MemOperand(ip)); // deref address
6342
6343 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6344 // macro for the add instruction because we don't want the coverage tool
6345 // inserting instructions here after we read the pc.
6346 __ mov(lr, Operand(pc));
6347 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6348
6349 // Unlink this frame from the handler chain. When reading the
6350 // address of the next handler, there is no need to use the address
6351 // displacement since the current stack pointer (sp) points directly
6352 // to the stack handler.
6353 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6354 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6355 __ str(r3, MemOperand(ip));
6356 // No need to restore registers
6357 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6358
6359
6360 __ bind(&exit); // r0 holds result
6361 // Restore the top frame descriptors from the stack.
6362 __ pop(r3);
6363 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6364 __ str(r3, MemOperand(ip));
6365
6366 // Reset the stack to the callee saved registers.
6367 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6368
6369 // Restore callee-saved registers and return.
6370#ifdef DEBUG
6371 if (FLAG_debug_code) {
6372 __ mov(lr, Operand(pc));
6373 }
6374#endif
6375 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6376}
6377
6378
6379// This stub performs an instanceof, calling the builtin function if
6380// necessary. Uses r1 for the object, r0 for the function that it may
6381// be an instance of (these are fetched from the stack).
6382void InstanceofStub::Generate(MacroAssembler* masm) {
6383 // Get the object - slow case for smis (we may need to throw an exception
6384 // depending on the rhs).
6385 Label slow, loop, is_instance, is_not_instance;
6386 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6387 __ BranchOnSmi(r0, &slow);
6388
6389 // Check that the left hand is a JS object and put map in r3.
6390 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6391 __ b(lt, &slow);
6392 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6393 __ b(gt, &slow);
6394
6395 // Get the prototype of the function (r4 is result, r2 is scratch).
6396 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6397 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6398
6399 // Check that the function prototype is a JS object.
6400 __ BranchOnSmi(r4, &slow);
6401 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6402 __ b(lt, &slow);
6403 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6404 __ b(gt, &slow);
6405
6406 // Register mapping: r3 is object map and r4 is function prototype.
6407 // Get prototype of object into r2.
6408 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6409
6410 // Loop through the prototype chain looking for the function prototype.
6411 __ bind(&loop);
6412 __ cmp(r2, Operand(r4));
6413 __ b(eq, &is_instance);
6414 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6415 __ cmp(r2, ip);
6416 __ b(eq, &is_not_instance);
6417 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6418 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6419 __ jmp(&loop);
6420
6421 __ bind(&is_instance);
6422 __ mov(r0, Operand(Smi::FromInt(0)));
6423 __ pop();
6424 __ pop();
6425 __ mov(pc, Operand(lr)); // Return.
6426
6427 __ bind(&is_not_instance);
6428 __ mov(r0, Operand(Smi::FromInt(1)));
6429 __ pop();
6430 __ pop();
6431 __ mov(pc, Operand(lr)); // Return.
6432
6433 // Slow-case. Tail call builtin.
6434 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006435 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6436}
6437
6438
6439void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6440 // Check if the calling frame is an arguments adaptor frame.
6441 Label adaptor;
6442 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6443 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6444 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6445 __ b(eq, &adaptor);
6446
6447 // Nothing to do: The formal number of parameters has already been
6448 // passed in register r0 by calling function. Just return it.
6449 __ Jump(lr);
6450
6451 // Arguments adaptor case: Read the arguments length from the
6452 // adaptor frame and return it.
6453 __ bind(&adaptor);
6454 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6455 __ Jump(lr);
6456}
6457
6458
6459void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6460 // The displacement is the offset of the last parameter (if any)
6461 // relative to the frame pointer.
6462 static const int kDisplacement =
6463 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6464
6465 // Check that the key is a smi.
6466 Label slow;
6467 __ BranchOnNotSmi(r1, &slow);
6468
6469 // Check if the calling frame is an arguments adaptor frame.
6470 Label adaptor;
6471 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6472 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6473 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6474 __ b(eq, &adaptor);
6475
6476 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00006477 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00006478 // check for free.
6479 __ cmp(r1, r0);
6480 __ b(cs, &slow);
6481
6482 // Read the argument from the stack and return it.
6483 __ sub(r3, r0, r1);
6484 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6485 __ ldr(r0, MemOperand(r3, kDisplacement));
6486 __ Jump(lr);
6487
6488 // Arguments adaptor case: Check index against actual arguments
6489 // limit found in the arguments adaptor frame. Use unsigned
6490 // comparison to get negative check for free.
6491 __ bind(&adaptor);
6492 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6493 __ cmp(r1, r0);
6494 __ b(cs, &slow);
6495
6496 // Read the argument from the adaptor frame and return it.
6497 __ sub(r3, r0, r1);
6498 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6499 __ ldr(r0, MemOperand(r3, kDisplacement));
6500 __ Jump(lr);
6501
6502 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6503 // by calling the runtime system.
6504 __ bind(&slow);
6505 __ push(r1);
6506 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6507}
6508
6509
6510void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6511 // Check if the calling frame is an arguments adaptor frame.
6512 Label runtime;
6513 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6514 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6515 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6516 __ b(ne, &runtime);
6517
6518 // Patch the arguments.length and the parameters pointer.
6519 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6520 __ str(r0, MemOperand(sp, 0 * kPointerSize));
6521 __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6522 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6523 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6524
6525 // Do the runtime call to allocate the arguments object.
6526 __ bind(&runtime);
6527 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6528}
6529
6530
6531void CallFunctionStub::Generate(MacroAssembler* masm) {
6532 Label slow;
6533 // Get the function to call from the stack.
6534 // function, receiver [, arguments]
6535 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6536
6537 // Check that the function is really a JavaScript function.
6538 // r1: pushed function (to be verified)
6539 __ BranchOnSmi(r1, &slow);
6540 // Get the map of the function object.
6541 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6542 __ b(ne, &slow);
6543
6544 // Fast-case: Invoke the function now.
6545 // r1: pushed function
6546 ParameterCount actual(argc_);
6547 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6548
6549 // Slow-case: Non-function called.
6550 __ bind(&slow);
6551 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6552 __ mov(r2, Operand(0));
6553 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6554 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6555 RelocInfo::CODE_TARGET);
6556}
6557
6558
6559int CompareStub::MinorKey() {
6560 // Encode the two parameters in a unique 16 bit value.
6561 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
6562 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
6563}
6564
6565
6566#undef __
6567
6568} } // namespace v8::internal