blob: 0c1dbcc5cba3831582d7f6b3228612f0316e691d [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39
40namespace v8 {
41namespace internal {
42
43#define __ ACCESS_MASM(masm_)
44
45static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000047 Condition cc,
48 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000049static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000050 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000051 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
55static void MultiplyByKnownInt(MacroAssembler* masm,
56 Register source,
57 Register destination,
58 int known_int);
59static bool IsEasyToMultiplyBy(int x);
60
61
62
63// -------------------------------------------------------------------------
64// Platform-specific DeferredCode functions.
65
66void DeferredCode::SaveRegisters() {
67 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
68 int action = registers_[i];
69 if (action == kPush) {
70 __ push(RegisterAllocator::ToRegister(i));
71 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
72 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
73 }
74 }
75}
76
77
78void DeferredCode::RestoreRegisters() {
79 // Restore registers in reverse order due to the stack.
80 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
81 int action = registers_[i];
82 if (action == kPush) {
83 __ pop(RegisterAllocator::ToRegister(i));
84 } else if (action != kIgnore) {
85 action &= ~kSyncedFlag;
86 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
87 }
88 }
89}
90
91
92// -------------------------------------------------------------------------
93// CodeGenState implementation.
94
95CodeGenState::CodeGenState(CodeGenerator* owner)
96 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000097 true_target_(NULL),
98 false_target_(NULL),
99 previous_(NULL) {
100 owner_->set_state(this);
101}
102
103
104CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 JumpTarget* true_target,
106 JumpTarget* false_target)
107 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 true_target_(true_target),
109 false_target_(false_target),
110 previous_(owner->state()) {
111 owner_->set_state(this);
112}
113
114
115CodeGenState::~CodeGenState() {
116 ASSERT(owner_->state() == this);
117 owner_->set_state(previous_);
118}
119
120
121// -------------------------------------------------------------------------
122// CodeGenerator implementation
123
124CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
125 bool is_eval)
126 : is_eval_(is_eval),
127 script_(script),
128 deferred_(8),
129 masm_(new MacroAssembler(NULL, buffer_size)),
130 scope_(NULL),
131 frame_(NULL),
132 allocator_(NULL),
133 cc_reg_(al),
134 state_(NULL),
135 function_return_is_shadowed_(false) {
136}
137
138
139// Calling conventions:
140// fp: caller's frame pointer
141// sp: stack pointer
142// r1: called JS function
143// cp: callee's context
144
145void CodeGenerator::GenCode(FunctionLiteral* fun) {
Steve Blockd0582a62009-12-15 09:54:21 +0000146 // Record the position for debugging purposes.
147 CodeForFunctionPosition(fun);
148
Steve Blocka7e24c12009-10-30 11:49:00 +0000149 ZoneList<Statement*>* body = fun->body();
150
151 // Initialize state.
152 ASSERT(scope_ == NULL);
153 scope_ = fun->scope();
154 ASSERT(allocator_ == NULL);
155 RegisterAllocator register_allocator(this);
156 allocator_ = &register_allocator;
157 ASSERT(frame_ == NULL);
158 frame_ = new VirtualFrame();
159 cc_reg_ = al;
160 {
161 CodeGenState state(this);
162
163 // Entry:
164 // Stack: receiver, arguments
165 // lr: return address
166 // fp: caller's frame pointer
167 // sp: stack pointer
168 // r1: called JS function
169 // cp: callee's context
170 allocator_->Initialize();
171 frame_->Enter();
172 // tos: code slot
173#ifdef DEBUG
174 if (strlen(FLAG_stop_at) > 0 &&
175 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
176 frame_->SpillAll();
177 __ stop("stop-at");
178 }
179#endif
180
181 // Allocate space for locals and initialize them. This also checks
182 // for stack overflow.
183 frame_->AllocateStackSlots();
184 // Initialize the function return target after the locals are set
185 // up, because it needs the expected frame height from the frame.
186 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
187 function_return_is_shadowed_ = false;
188
189 VirtualFrame::SpilledScope spilled_scope;
Leon Clarkee46be812010-01-19 14:06:41 +0000190 int heap_slots = scope_->num_heap_slots();
191 if (heap_slots > 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000192 // Allocate local context.
193 // Get outer context and create a new context based on it.
194 __ ldr(r0, frame_->Function());
195 frame_->EmitPush(r0);
Leon Clarkee46be812010-01-19 14:06:41 +0000196 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
197 FastNewContextStub stub(heap_slots);
198 frame_->CallStub(&stub, 1);
199 } else {
200 frame_->CallRuntime(Runtime::kNewContext, 1);
201 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000202
203#ifdef DEBUG
204 JumpTarget verified_true;
205 __ cmp(r0, Operand(cp));
206 verified_true.Branch(eq);
207 __ stop("NewContext: r0 is expected to be the same as cp");
208 verified_true.Bind();
209#endif
210 // Update context local.
211 __ str(cp, frame_->Context());
212 }
213
214 // TODO(1241774): Improve this code:
215 // 1) only needed if we have a context
216 // 2) no need to recompute context ptr every single time
217 // 3) don't copy parameter operand code from SlotOperand!
218 {
219 Comment cmnt2(masm_, "[ copy context parameters into .context");
220
221 // Note that iteration order is relevant here! If we have the same
222 // parameter twice (e.g., function (x, y, x)), and that parameter
223 // needs to be copied into the context, it must be the last argument
224 // passed to the parameter that needs to be copied. This is a rare
225 // case so we don't check for it, instead we rely on the copying
226 // order: such a parameter is copied repeatedly into the same
227 // context location and thus the last value is what is seen inside
228 // the function.
229 for (int i = 0; i < scope_->num_parameters(); i++) {
230 Variable* par = scope_->parameter(i);
231 Slot* slot = par->slot();
232 if (slot != NULL && slot->type() == Slot::CONTEXT) {
233 ASSERT(!scope_->is_global_scope()); // no parameters in global scope
234 __ ldr(r1, frame_->ParameterAt(i));
235 // Loads r2 with context; used below in RecordWrite.
236 __ str(r1, SlotOperand(slot, r2));
237 // Load the offset into r3.
238 int slot_offset =
239 FixedArray::kHeaderSize + slot->index() * kPointerSize;
240 __ mov(r3, Operand(slot_offset));
241 __ RecordWrite(r2, r3, r1);
242 }
243 }
244 }
245
246 // Store the arguments object. This must happen after context
247 // initialization because the arguments object may be stored in the
248 // context.
249 if (scope_->arguments() != NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 Comment cmnt(masm_, "[ allocate arguments object");
Leon Clarkee46be812010-01-19 14:06:41 +0000251 ASSERT(scope_->arguments_shadow() != NULL);
252 Variable* arguments = scope_->arguments()->var();
253 Variable* shadow = scope_->arguments_shadow()->var();
254 ASSERT(arguments != NULL && arguments->slot() != NULL);
255 ASSERT(shadow != NULL && shadow->slot() != NULL);
256 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
257 __ ldr(r2, frame_->Function());
258 // The receiver is below the arguments, the return address, and the
259 // frame pointer on the stack.
260 const int kReceiverDisplacement = 2 + scope_->num_parameters();
261 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
262 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
263 frame_->Adjust(3);
264 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
265 frame_->CallStub(&stub, 3);
266 frame_->EmitPush(r0);
267 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
268 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +0000269 frame_->Drop(); // Value is no longer needed.
270 }
271
Leon Clarkee46be812010-01-19 14:06:41 +0000272 // Initialize ThisFunction reference if present.
273 if (scope_->is_function_scope() && scope_->function() != NULL) {
274 __ mov(ip, Operand(Factory::the_hole_value()));
275 frame_->EmitPush(ip);
276 StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
277 }
278
Steve Blocka7e24c12009-10-30 11:49:00 +0000279 // Generate code to 'execute' declarations and initialize functions
280 // (source elements). In case of an illegal redeclaration we need to
281 // handle that instead of processing the declarations.
282 if (scope_->HasIllegalRedeclaration()) {
283 Comment cmnt(masm_, "[ illegal redeclarations");
284 scope_->VisitIllegalRedeclaration(this);
285 } else {
286 Comment cmnt(masm_, "[ declarations");
287 ProcessDeclarations(scope_->declarations());
288 // Bail out if a stack-overflow exception occurred when processing
289 // declarations.
290 if (HasStackOverflow()) return;
291 }
292
293 if (FLAG_trace) {
294 frame_->CallRuntime(Runtime::kTraceEnter, 0);
295 // Ignore the return value.
296 }
297
298 // Compile the body of the function in a vanilla state. Don't
299 // bother compiling all the code if the scope has an illegal
300 // redeclaration.
301 if (!scope_->HasIllegalRedeclaration()) {
302 Comment cmnt(masm_, "[ function body");
303#ifdef DEBUG
304 bool is_builtin = Bootstrapper::IsActive();
305 bool should_trace =
306 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
307 if (should_trace) {
308 frame_->CallRuntime(Runtime::kDebugTrace, 0);
309 // Ignore the return value.
310 }
311#endif
312 VisitStatementsAndSpill(body);
313 }
314 }
315
316 // Generate the return sequence if necessary.
317 if (has_valid_frame() || function_return_.is_linked()) {
318 if (!function_return_.is_linked()) {
319 CodeForReturnPosition(fun);
320 }
321 // exit
322 // r0: result
323 // sp: stack pointer
324 // fp: frame pointer
325 // cp: callee's context
326 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
327
328 function_return_.Bind();
329 if (FLAG_trace) {
330 // Push the return value on the stack as the parameter.
331 // Runtime::TraceExit returns the parameter as it is.
332 frame_->EmitPush(r0);
333 frame_->CallRuntime(Runtime::kTraceExit, 1);
334 }
335
336 // Add a label for checking the size of the code used for returning.
337 Label check_exit_codesize;
338 masm_->bind(&check_exit_codesize);
339
Steve Blockd0582a62009-12-15 09:54:21 +0000340 // Calculate the exact length of the return sequence and make sure that
341 // the constant pool is not emitted inside of the return sequence.
342 int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
343 int return_sequence_length = Assembler::kJSReturnSequenceLength;
344 if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
345 // Additional mov instruction generated.
346 return_sequence_length++;
347 }
348 masm_->BlockConstPoolFor(return_sequence_length);
349
Steve Blocka7e24c12009-10-30 11:49:00 +0000350 // Tear down the frame which will restore the caller's frame pointer and
351 // the link register.
352 frame_->Exit();
353
354 // Here we use masm_-> instead of the __ macro to avoid the code coverage
355 // tool from instrumenting as we rely on the code size here.
Steve Blockd0582a62009-12-15 09:54:21 +0000356 masm_->add(sp, sp, Operand(sp_delta));
Steve Blocka7e24c12009-10-30 11:49:00 +0000357 masm_->Jump(lr);
358
359 // Check that the size of the code used for returning matches what is
Steve Blockd0582a62009-12-15 09:54:21 +0000360 // expected by the debugger. The add instruction above is an addressing
361 // mode 1 instruction where there are restrictions on which immediate values
362 // can be encoded in the instruction and which immediate values requires
363 // use of an additional instruction for moving the immediate to a temporary
364 // register.
365 ASSERT_EQ(return_sequence_length,
Steve Blocka7e24c12009-10-30 11:49:00 +0000366 masm_->InstructionsGeneratedSince(&check_exit_codesize));
367 }
368
369 // Code generation state must be reset.
370 ASSERT(!has_cc());
371 ASSERT(state_ == NULL);
372 ASSERT(!function_return_is_shadowed_);
373 function_return_.Unuse();
374 DeleteFrame();
375
376 // Process any deferred code using the register allocator.
377 if (!HasStackOverflow()) {
378 ProcessDeferred();
379 }
380
381 allocator_ = NULL;
382 scope_ = NULL;
383}
384
385
386MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
387 // Currently, this assertion will fail if we try to assign to
388 // a constant variable that is constant because it is read-only
389 // (such as the variable referring to a named function expression).
390 // We need to implement assignments to read-only variables.
391 // Ideally, we should do this during AST generation (by converting
392 // such assignments into expression statements); however, in general
393 // we may not be able to make the decision until past AST generation,
394 // that is when the entire program is known.
395 ASSERT(slot != NULL);
396 int index = slot->index();
397 switch (slot->type()) {
398 case Slot::PARAMETER:
399 return frame_->ParameterAt(index);
400
401 case Slot::LOCAL:
402 return frame_->LocalAt(index);
403
404 case Slot::CONTEXT: {
405 // Follow the context chain if necessary.
406 ASSERT(!tmp.is(cp)); // do not overwrite context register
407 Register context = cp;
408 int chain_length = scope()->ContextChainLength(slot->var()->scope());
409 for (int i = 0; i < chain_length; i++) {
410 // Load the closure.
411 // (All contexts, even 'with' contexts, have a closure,
412 // and it is the same for all contexts inside a function.
413 // There is no need to go to the function context first.)
414 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
415 // Load the function context (which is the incoming, outer context).
416 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
417 context = tmp;
418 }
419 // We may have a 'with' context now. Get the function context.
420 // (In fact this mov may never be the needed, since the scope analysis
421 // may not permit a direct context access in this case and thus we are
422 // always at a function context. However it is safe to dereference be-
423 // cause the function context of a function context is itself. Before
424 // deleting this mov we should try to create a counter-example first,
425 // though...)
426 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
427 return ContextOperand(tmp, index);
428 }
429
430 default:
431 UNREACHABLE();
432 return MemOperand(r0, 0);
433 }
434}
435
436
437MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
438 Slot* slot,
439 Register tmp,
440 Register tmp2,
441 JumpTarget* slow) {
442 ASSERT(slot->type() == Slot::CONTEXT);
443 Register context = cp;
444
445 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
446 if (s->num_heap_slots() > 0) {
447 if (s->calls_eval()) {
448 // Check that extension is NULL.
449 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
450 __ tst(tmp2, tmp2);
451 slow->Branch(ne);
452 }
453 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
454 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
455 context = tmp;
456 }
457 }
458 // Check that last extension is NULL.
459 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
460 __ tst(tmp2, tmp2);
461 slow->Branch(ne);
462 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
463 return ContextOperand(tmp, slot->index());
464}
465
466
467// Loads a value on TOS. If it is a boolean value, the result may have been
468// (partially) translated into branches, or it may have set the condition
469// code register. If force_cc is set, the value is forced to set the
470// condition code register and no value is pushed. If the condition code
471// register was set, has_cc() is true and cc_reg_ contains the condition to
472// test for 'true'.
473void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000474 JumpTarget* true_target,
475 JumpTarget* false_target,
476 bool force_cc) {
477 ASSERT(!has_cc());
478 int original_height = frame_->height();
479
Steve Blockd0582a62009-12-15 09:54:21 +0000480 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 Visit(x);
482
483 // If we hit a stack overflow, we may not have actually visited
484 // the expression. In that case, we ensure that we have a
485 // valid-looking frame state because we will continue to generate
486 // code as we unwind the C++ stack.
487 //
488 // It's possible to have both a stack overflow and a valid frame
489 // state (eg, a subexpression overflowed, visiting it returned
490 // with a dummied frame state, and visiting this expression
491 // returned with a normal-looking state).
492 if (HasStackOverflow() &&
493 has_valid_frame() &&
494 !has_cc() &&
495 frame_->height() == original_height) {
496 true_target->Jump();
497 }
498 }
499 if (force_cc && frame_ != NULL && !has_cc()) {
500 // Convert the TOS value to a boolean in the condition code register.
501 ToBoolean(true_target, false_target);
502 }
503 ASSERT(!force_cc || !has_valid_frame() || has_cc());
504 ASSERT(!has_valid_frame() ||
505 (has_cc() && frame_->height() == original_height) ||
506 (!has_cc() && frame_->height() == original_height + 1));
507}
508
509
Steve Blockd0582a62009-12-15 09:54:21 +0000510void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000511#ifdef DEBUG
512 int original_height = frame_->height();
513#endif
514 JumpTarget true_target;
515 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000516 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000517
518 if (has_cc()) {
519 // Convert cc_reg_ into a boolean value.
520 JumpTarget loaded;
521 JumpTarget materialize_true;
522 materialize_true.Branch(cc_reg_);
523 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
524 frame_->EmitPush(r0);
525 loaded.Jump();
526 materialize_true.Bind();
527 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
528 frame_->EmitPush(r0);
529 loaded.Bind();
530 cc_reg_ = al;
531 }
532
533 if (true_target.is_linked() || false_target.is_linked()) {
534 // We have at least one condition value that has been "translated"
535 // into a branch, thus it needs to be loaded explicitly.
536 JumpTarget loaded;
537 if (frame_ != NULL) {
538 loaded.Jump(); // Don't lose the current TOS.
539 }
540 bool both = true_target.is_linked() && false_target.is_linked();
541 // Load "true" if necessary.
542 if (true_target.is_linked()) {
543 true_target.Bind();
544 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
545 frame_->EmitPush(r0);
546 }
547 // If both "true" and "false" need to be loaded jump across the code for
548 // "false".
549 if (both) {
550 loaded.Jump();
551 }
552 // Load "false" if necessary.
553 if (false_target.is_linked()) {
554 false_target.Bind();
555 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
556 frame_->EmitPush(r0);
557 }
558 // A value is loaded on all paths reaching this point.
559 loaded.Bind();
560 }
561 ASSERT(has_valid_frame());
562 ASSERT(!has_cc());
563 ASSERT(frame_->height() == original_height + 1);
564}
565
566
567void CodeGenerator::LoadGlobal() {
568 VirtualFrame::SpilledScope spilled_scope;
569 __ ldr(r0, GlobalObject());
570 frame_->EmitPush(r0);
571}
572
573
574void CodeGenerator::LoadGlobalReceiver(Register scratch) {
575 VirtualFrame::SpilledScope spilled_scope;
576 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
577 __ ldr(scratch,
578 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
579 frame_->EmitPush(scratch);
580}
581
582
Steve Blockd0582a62009-12-15 09:54:21 +0000583void CodeGenerator::LoadTypeofExpression(Expression* expr) {
584 // Special handling of identifiers as subexpressions of typeof.
Steve Blocka7e24c12009-10-30 11:49:00 +0000585 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +0000586 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000587 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000588 // For a global variable we build the property reference
589 // <global>.<variable> and perform a (regular non-contextual) property
590 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000591 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
592 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000593 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000594 Reference ref(this, &property);
595 ref.GetValueAndSpill();
596 } else if (variable != NULL && variable->slot() != NULL) {
597 // For a variable that rewrites to a slot, we signal it is the immediate
598 // subexpression of a typeof.
599 LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
600 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000601 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000602 // Anything else can be handled normally.
603 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000604 }
605}
606
607
608Reference::Reference(CodeGenerator* cgen, Expression* expression)
609 : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
610 cgen->LoadReference(this);
611}
612
613
614Reference::~Reference() {
615 cgen_->UnloadReference(this);
616}
617
618
619void CodeGenerator::LoadReference(Reference* ref) {
620 VirtualFrame::SpilledScope spilled_scope;
621 Comment cmnt(masm_, "[ LoadReference");
622 Expression* e = ref->expression();
623 Property* property = e->AsProperty();
624 Variable* var = e->AsVariableProxy()->AsVariable();
625
626 if (property != NULL) {
627 // The expression is either a property or a variable proxy that rewrites
628 // to a property.
629 LoadAndSpill(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000630 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000631 ref->set_type(Reference::NAMED);
632 } else {
633 LoadAndSpill(property->key());
634 ref->set_type(Reference::KEYED);
635 }
636 } else if (var != NULL) {
637 // The expression is a variable proxy that does not rewrite to a
638 // property. Global variables are treated as named property references.
639 if (var->is_global()) {
640 LoadGlobal();
641 ref->set_type(Reference::NAMED);
642 } else {
643 ASSERT(var->slot() != NULL);
644 ref->set_type(Reference::SLOT);
645 }
646 } else {
647 // Anything else is a runtime error.
648 LoadAndSpill(e);
649 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
650 }
651}
652
653
654void CodeGenerator::UnloadReference(Reference* ref) {
655 VirtualFrame::SpilledScope spilled_scope;
656 // Pop a reference from the stack while preserving TOS.
657 Comment cmnt(masm_, "[ UnloadReference");
658 int size = ref->size();
659 if (size > 0) {
660 frame_->EmitPop(r0);
661 frame_->Drop(size);
662 frame_->EmitPush(r0);
663 }
664}
665
666
667// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
668// register to a boolean in the condition code register. The code
669// may jump to 'false_target' in case the register converts to 'false'.
670void CodeGenerator::ToBoolean(JumpTarget* true_target,
671 JumpTarget* false_target) {
672 VirtualFrame::SpilledScope spilled_scope;
673 // Note: The generated code snippet does not change stack variables.
674 // Only the condition code should be set.
675 frame_->EmitPop(r0);
676
677 // Fast case checks
678
679 // Check if the value is 'false'.
680 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
681 __ cmp(r0, ip);
682 false_target->Branch(eq);
683
684 // Check if the value is 'true'.
685 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
686 __ cmp(r0, ip);
687 true_target->Branch(eq);
688
689 // Check if the value is 'undefined'.
690 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
691 __ cmp(r0, ip);
692 false_target->Branch(eq);
693
694 // Check if the value is a smi.
695 __ cmp(r0, Operand(Smi::FromInt(0)));
696 false_target->Branch(eq);
697 __ tst(r0, Operand(kSmiTagMask));
698 true_target->Branch(eq);
699
700 // Slow case: call the runtime.
701 frame_->EmitPush(r0);
702 frame_->CallRuntime(Runtime::kToBool, 1);
703 // Convert the result (r0) to a condition code.
704 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
705 __ cmp(r0, ip);
706
707 cc_reg_ = ne;
708}
709
710
711void CodeGenerator::GenericBinaryOperation(Token::Value op,
712 OverwriteMode overwrite_mode,
713 int constant_rhs) {
714 VirtualFrame::SpilledScope spilled_scope;
715 // sp[0] : y
716 // sp[1] : x
717 // result : r0
718
719 // Stub is entered with a call: 'return address' is in lr.
720 switch (op) {
721 case Token::ADD: // fall through.
722 case Token::SUB: // fall through.
723 case Token::MUL:
724 case Token::DIV:
725 case Token::MOD:
726 case Token::BIT_OR:
727 case Token::BIT_AND:
728 case Token::BIT_XOR:
729 case Token::SHL:
730 case Token::SHR:
731 case Token::SAR: {
732 frame_->EmitPop(r0); // r0 : y
733 frame_->EmitPop(r1); // r1 : x
734 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
735 frame_->CallStub(&stub, 0);
736 break;
737 }
738
739 case Token::COMMA:
740 frame_->EmitPop(r0);
741 // simply discard left value
742 frame_->Drop();
743 break;
744
745 default:
746 // Other cases should have been handled before this point.
747 UNREACHABLE();
748 break;
749 }
750}
751
752
753class DeferredInlineSmiOperation: public DeferredCode {
754 public:
755 DeferredInlineSmiOperation(Token::Value op,
756 int value,
757 bool reversed,
758 OverwriteMode overwrite_mode)
759 : op_(op),
760 value_(value),
761 reversed_(reversed),
762 overwrite_mode_(overwrite_mode) {
763 set_comment("[ DeferredInlinedSmiOperation");
764 }
765
766 virtual void Generate();
767
768 private:
769 Token::Value op_;
770 int value_;
771 bool reversed_;
772 OverwriteMode overwrite_mode_;
773};
774
775
776void DeferredInlineSmiOperation::Generate() {
777 switch (op_) {
778 case Token::ADD: {
779 // Revert optimistic add.
780 if (reversed_) {
781 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
782 __ mov(r1, Operand(Smi::FromInt(value_)));
783 } else {
784 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
785 __ mov(r0, Operand(Smi::FromInt(value_)));
786 }
787 break;
788 }
789
790 case Token::SUB: {
791 // Revert optimistic sub.
792 if (reversed_) {
793 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
794 __ mov(r1, Operand(Smi::FromInt(value_)));
795 } else {
796 __ add(r1, r0, Operand(Smi::FromInt(value_)));
797 __ mov(r0, Operand(Smi::FromInt(value_)));
798 }
799 break;
800 }
801
802 // For these operations there is no optimistic operation that needs to be
803 // reverted.
804 case Token::MUL:
805 case Token::MOD:
806 case Token::BIT_OR:
807 case Token::BIT_XOR:
808 case Token::BIT_AND: {
809 if (reversed_) {
810 __ mov(r1, Operand(Smi::FromInt(value_)));
811 } else {
812 __ mov(r1, Operand(r0));
813 __ mov(r0, Operand(Smi::FromInt(value_)));
814 }
815 break;
816 }
817
818 case Token::SHL:
819 case Token::SHR:
820 case Token::SAR: {
821 if (!reversed_) {
822 __ mov(r1, Operand(r0));
823 __ mov(r0, Operand(Smi::FromInt(value_)));
824 } else {
825 UNREACHABLE(); // Should have been handled in SmiOperation.
826 }
827 break;
828 }
829
830 default:
831 // Other cases should have been handled before this point.
832 UNREACHABLE();
833 break;
834 }
835
836 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
837 __ CallStub(&stub);
838}
839
840
841static bool PopCountLessThanEqual2(unsigned int x) {
842 x &= x - 1;
843 return (x & (x - 1)) == 0;
844}
845
846
847// Returns the index of the lowest bit set.
848static int BitPosition(unsigned x) {
849 int bit_posn = 0;
850 while ((x & 0xf) == 0) {
851 bit_posn += 4;
852 x >>= 4;
853 }
854 while ((x & 1) == 0) {
855 bit_posn++;
856 x >>= 1;
857 }
858 return bit_posn;
859}
860
861
862void CodeGenerator::SmiOperation(Token::Value op,
863 Handle<Object> value,
864 bool reversed,
865 OverwriteMode mode) {
866 VirtualFrame::SpilledScope spilled_scope;
867 // NOTE: This is an attempt to inline (a bit) more of the code for
868 // some possible smi operations (like + and -) when (at least) one
869 // of the operands is a literal smi. With this optimization, the
870 // performance of the system is increased by ~15%, and the generated
871 // code size is increased by ~1% (measured on a combination of
872 // different benchmarks).
873
874 // sp[0] : operand
875
876 int int_value = Smi::cast(*value)->value();
877
878 JumpTarget exit;
879 frame_->EmitPop(r0);
880
881 bool something_to_inline = true;
882 switch (op) {
883 case Token::ADD: {
884 DeferredCode* deferred =
885 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
886
887 __ add(r0, r0, Operand(value), SetCC);
888 deferred->Branch(vs);
889 __ tst(r0, Operand(kSmiTagMask));
890 deferred->Branch(ne);
891 deferred->BindExit();
892 break;
893 }
894
895 case Token::SUB: {
896 DeferredCode* deferred =
897 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
898
899 if (reversed) {
900 __ rsb(r0, r0, Operand(value), SetCC);
901 } else {
902 __ sub(r0, r0, Operand(value), SetCC);
903 }
904 deferred->Branch(vs);
905 __ tst(r0, Operand(kSmiTagMask));
906 deferred->Branch(ne);
907 deferred->BindExit();
908 break;
909 }
910
911
912 case Token::BIT_OR:
913 case Token::BIT_XOR:
914 case Token::BIT_AND: {
915 DeferredCode* deferred =
916 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
917 __ tst(r0, Operand(kSmiTagMask));
918 deferred->Branch(ne);
919 switch (op) {
920 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
921 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
922 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
923 default: UNREACHABLE();
924 }
925 deferred->BindExit();
926 break;
927 }
928
929 case Token::SHL:
930 case Token::SHR:
931 case Token::SAR: {
932 if (reversed) {
933 something_to_inline = false;
934 break;
935 }
936 int shift_value = int_value & 0x1f; // least significant 5 bits
937 DeferredCode* deferred =
938 new DeferredInlineSmiOperation(op, shift_value, false, mode);
939 __ tst(r0, Operand(kSmiTagMask));
940 deferred->Branch(ne);
941 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
942 switch (op) {
943 case Token::SHL: {
944 if (shift_value != 0) {
945 __ mov(r2, Operand(r2, LSL, shift_value));
946 }
947 // check that the *unsigned* result fits in a smi
948 __ add(r3, r2, Operand(0x40000000), SetCC);
949 deferred->Branch(mi);
950 break;
951 }
952 case Token::SHR: {
953 // LSR by immediate 0 means shifting 32 bits.
954 if (shift_value != 0) {
955 __ mov(r2, Operand(r2, LSR, shift_value));
956 }
957 // check that the *unsigned* result fits in a smi
958 // neither of the two high-order bits can be set:
959 // - 0x80000000: high bit would be lost when smi tagging
960 // - 0x40000000: this number would convert to negative when
961 // smi tagging these two cases can only happen with shifts
962 // by 0 or 1 when handed a valid smi
963 __ and_(r3, r2, Operand(0xc0000000), SetCC);
964 deferred->Branch(ne);
965 break;
966 }
967 case Token::SAR: {
968 if (shift_value != 0) {
969 // ASR by immediate 0 means shifting 32 bits.
970 __ mov(r2, Operand(r2, ASR, shift_value));
971 }
972 break;
973 }
974 default: UNREACHABLE();
975 }
976 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
977 deferred->BindExit();
978 break;
979 }
980
981 case Token::MOD: {
982 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
983 something_to_inline = false;
984 break;
985 }
986 DeferredCode* deferred =
987 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
988 unsigned mask = (0x80000000u | kSmiTagMask);
989 __ tst(r0, Operand(mask));
990 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
991 mask = (int_value << kSmiTagSize) - 1;
992 __ and_(r0, r0, Operand(mask));
993 deferred->BindExit();
994 break;
995 }
996
997 case Token::MUL: {
998 if (!IsEasyToMultiplyBy(int_value)) {
999 something_to_inline = false;
1000 break;
1001 }
1002 DeferredCode* deferred =
1003 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1004 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1005 max_smi_that_wont_overflow <<= kSmiTagSize;
1006 unsigned mask = 0x80000000u;
1007 while ((mask & max_smi_that_wont_overflow) == 0) {
1008 mask |= mask >> 1;
1009 }
1010 mask |= kSmiTagMask;
1011 // This does a single mask that checks for a too high value in a
1012 // conservative way and for a non-Smi. It also filters out negative
1013 // numbers, unfortunately, but since this code is inline we prefer
1014 // brevity to comprehensiveness.
1015 __ tst(r0, Operand(mask));
1016 deferred->Branch(ne);
1017 MultiplyByKnownInt(masm_, r0, r0, int_value);
1018 deferred->BindExit();
1019 break;
1020 }
1021
1022 default:
1023 something_to_inline = false;
1024 break;
1025 }
1026
1027 if (!something_to_inline) {
1028 if (!reversed) {
1029 frame_->EmitPush(r0);
1030 __ mov(r0, Operand(value));
1031 frame_->EmitPush(r0);
1032 GenericBinaryOperation(op, mode, int_value);
1033 } else {
1034 __ mov(ip, Operand(value));
1035 frame_->EmitPush(ip);
1036 frame_->EmitPush(r0);
1037 GenericBinaryOperation(op, mode, kUnknownIntValue);
1038 }
1039 }
1040
1041 exit.Bind();
1042}
1043
1044
1045void CodeGenerator::Comparison(Condition cc,
1046 Expression* left,
1047 Expression* right,
1048 bool strict) {
1049 if (left != NULL) LoadAndSpill(left);
1050 if (right != NULL) LoadAndSpill(right);
1051
1052 VirtualFrame::SpilledScope spilled_scope;
1053 // sp[0] : y
1054 // sp[1] : x
1055 // result : cc register
1056
1057 // Strict only makes sense for equality comparisons.
1058 ASSERT(!strict || cc == eq);
1059
1060 JumpTarget exit;
1061 JumpTarget smi;
1062 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1063 if (cc == gt || cc == le) {
1064 cc = ReverseCondition(cc);
1065 frame_->EmitPop(r1);
1066 frame_->EmitPop(r0);
1067 } else {
1068 frame_->EmitPop(r0);
1069 frame_->EmitPop(r1);
1070 }
1071 __ orr(r2, r0, Operand(r1));
1072 __ tst(r2, Operand(kSmiTagMask));
1073 smi.Branch(eq);
1074
1075 // Perform non-smi comparison by stub.
1076 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1077 // We call with 0 args because there are 0 on the stack.
1078 CompareStub stub(cc, strict);
1079 frame_->CallStub(&stub, 0);
1080 __ cmp(r0, Operand(0));
1081 exit.Jump();
1082
1083 // Do smi comparisons by pointer comparison.
1084 smi.Bind();
1085 __ cmp(r1, Operand(r0));
1086
1087 exit.Bind();
1088 cc_reg_ = cc;
1089}
1090
1091
Steve Blocka7e24c12009-10-30 11:49:00 +00001092// Call the function on the stack with the given arguments.
1093void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001094 CallFunctionFlags flags,
1095 int position) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001096 VirtualFrame::SpilledScope spilled_scope;
1097 // Push the arguments ("left-to-right") on the stack.
1098 int arg_count = args->length();
1099 for (int i = 0; i < arg_count; i++) {
1100 LoadAndSpill(args->at(i));
1101 }
1102
1103 // Record the position for debugging purposes.
1104 CodeForSourcePosition(position);
1105
1106 // Use the shared code stub to call the function.
1107 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001108 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 frame_->CallStub(&call_function, arg_count + 1);
1110
1111 // Restore context and pop function from the stack.
1112 __ ldr(cp, frame_->Context());
1113 frame_->Drop(); // discard the TOS
1114}
1115
1116
1117void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1118 VirtualFrame::SpilledScope spilled_scope;
1119 ASSERT(has_cc());
1120 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1121 target->Branch(cc);
1122 cc_reg_ = al;
1123}
1124
1125
1126void CodeGenerator::CheckStack() {
1127 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +00001128 Comment cmnt(masm_, "[ check stack");
1129 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1130 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1131 // the implicit 8 byte offset that always applies to operations with pc and
1132 // gives a return address 12 bytes down.
1133 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1134 masm_->cmp(sp, Operand(ip));
1135 StackCheckStub stub;
1136 // Call the stub if lower.
1137 masm_->mov(pc,
1138 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1139 RelocInfo::CODE_TARGET),
1140 LeaveCC,
1141 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001142}
1143
1144
1145void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1146#ifdef DEBUG
1147 int original_height = frame_->height();
1148#endif
1149 VirtualFrame::SpilledScope spilled_scope;
1150 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1151 VisitAndSpill(statements->at(i));
1152 }
1153 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1154}
1155
1156
1157void CodeGenerator::VisitBlock(Block* node) {
1158#ifdef DEBUG
1159 int original_height = frame_->height();
1160#endif
1161 VirtualFrame::SpilledScope spilled_scope;
1162 Comment cmnt(masm_, "[ Block");
1163 CodeForStatementPosition(node);
1164 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1165 VisitStatementsAndSpill(node->statements());
1166 if (node->break_target()->is_linked()) {
1167 node->break_target()->Bind();
1168 }
1169 node->break_target()->Unuse();
1170 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1171}
1172
1173
1174void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1175 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001176 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001177 __ mov(r0, Operand(pairs));
1178 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001179 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1180 frame_->EmitPush(r0);
1181 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1182 // The result is discarded.
1183}
1184
1185
1186void CodeGenerator::VisitDeclaration(Declaration* node) {
1187#ifdef DEBUG
1188 int original_height = frame_->height();
1189#endif
1190 VirtualFrame::SpilledScope spilled_scope;
1191 Comment cmnt(masm_, "[ Declaration");
1192 Variable* var = node->proxy()->var();
1193 ASSERT(var != NULL); // must have been resolved
1194 Slot* slot = var->slot();
1195
1196 // If it was not possible to allocate the variable at compile time,
1197 // we need to "declare" it at runtime to make sure it actually
1198 // exists in the local context.
1199 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1200 // Variables with a "LOOKUP" slot were introduced as non-locals
1201 // during variable resolution and must have mode DYNAMIC.
1202 ASSERT(var->is_dynamic());
1203 // For now, just do a runtime call.
1204 frame_->EmitPush(cp);
1205 __ mov(r0, Operand(var->name()));
1206 frame_->EmitPush(r0);
1207 // Declaration nodes are always declared in only two modes.
1208 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1209 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1210 __ mov(r0, Operand(Smi::FromInt(attr)));
1211 frame_->EmitPush(r0);
1212 // Push initial value, if any.
1213 // Note: For variables we must not push an initial value (such as
1214 // 'undefined') because we may have a (legal) redeclaration and we
1215 // must not destroy the current value.
1216 if (node->mode() == Variable::CONST) {
1217 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1218 frame_->EmitPush(r0);
1219 } else if (node->fun() != NULL) {
1220 LoadAndSpill(node->fun());
1221 } else {
1222 __ mov(r0, Operand(0)); // no initial value!
1223 frame_->EmitPush(r0);
1224 }
1225 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1226 // Ignore the return value (declarations are statements).
1227 ASSERT(frame_->height() == original_height);
1228 return;
1229 }
1230
1231 ASSERT(!var->is_global());
1232
1233 // If we have a function or a constant, we need to initialize the variable.
1234 Expression* val = NULL;
1235 if (node->mode() == Variable::CONST) {
1236 val = new Literal(Factory::the_hole_value());
1237 } else {
1238 val = node->fun(); // NULL if we don't have a function
1239 }
1240
1241 if (val != NULL) {
1242 {
1243 // Set initial value.
1244 Reference target(this, node->proxy());
1245 LoadAndSpill(val);
1246 target.SetValue(NOT_CONST_INIT);
1247 // The reference is removed from the stack (preserving TOS) when
1248 // it goes out of scope.
1249 }
1250 // Get rid of the assigned value (declarations are statements).
1251 frame_->Drop();
1252 }
1253 ASSERT(frame_->height() == original_height);
1254}
1255
1256
1257void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1258#ifdef DEBUG
1259 int original_height = frame_->height();
1260#endif
1261 VirtualFrame::SpilledScope spilled_scope;
1262 Comment cmnt(masm_, "[ ExpressionStatement");
1263 CodeForStatementPosition(node);
1264 Expression* expression = node->expression();
1265 expression->MarkAsStatement();
1266 LoadAndSpill(expression);
1267 frame_->Drop();
1268 ASSERT(frame_->height() == original_height);
1269}
1270
1271
1272void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1273#ifdef DEBUG
1274 int original_height = frame_->height();
1275#endif
1276 VirtualFrame::SpilledScope spilled_scope;
1277 Comment cmnt(masm_, "// EmptyStatement");
1278 CodeForStatementPosition(node);
1279 // nothing to do
1280 ASSERT(frame_->height() == original_height);
1281}
1282
1283
1284void CodeGenerator::VisitIfStatement(IfStatement* node) {
1285#ifdef DEBUG
1286 int original_height = frame_->height();
1287#endif
1288 VirtualFrame::SpilledScope spilled_scope;
1289 Comment cmnt(masm_, "[ IfStatement");
1290 // Generate different code depending on which parts of the if statement
1291 // are present or not.
1292 bool has_then_stm = node->HasThenStatement();
1293 bool has_else_stm = node->HasElseStatement();
1294
1295 CodeForStatementPosition(node);
1296
1297 JumpTarget exit;
1298 if (has_then_stm && has_else_stm) {
1299 Comment cmnt(masm_, "[ IfThenElse");
1300 JumpTarget then;
1301 JumpTarget else_;
1302 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001303 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001304 if (frame_ != NULL) {
1305 Branch(false, &else_);
1306 }
1307 // then
1308 if (frame_ != NULL || then.is_linked()) {
1309 then.Bind();
1310 VisitAndSpill(node->then_statement());
1311 }
1312 if (frame_ != NULL) {
1313 exit.Jump();
1314 }
1315 // else
1316 if (else_.is_linked()) {
1317 else_.Bind();
1318 VisitAndSpill(node->else_statement());
1319 }
1320
1321 } else if (has_then_stm) {
1322 Comment cmnt(masm_, "[ IfThen");
1323 ASSERT(!has_else_stm);
1324 JumpTarget then;
1325 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001326 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001327 if (frame_ != NULL) {
1328 Branch(false, &exit);
1329 }
1330 // then
1331 if (frame_ != NULL || then.is_linked()) {
1332 then.Bind();
1333 VisitAndSpill(node->then_statement());
1334 }
1335
1336 } else if (has_else_stm) {
1337 Comment cmnt(masm_, "[ IfElse");
1338 ASSERT(!has_then_stm);
1339 JumpTarget else_;
1340 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001341 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001342 if (frame_ != NULL) {
1343 Branch(true, &exit);
1344 }
1345 // else
1346 if (frame_ != NULL || else_.is_linked()) {
1347 else_.Bind();
1348 VisitAndSpill(node->else_statement());
1349 }
1350
1351 } else {
1352 Comment cmnt(masm_, "[ If");
1353 ASSERT(!has_then_stm && !has_else_stm);
1354 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001355 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001356 if (frame_ != NULL) {
1357 if (has_cc()) {
1358 cc_reg_ = al;
1359 } else {
1360 frame_->Drop();
1361 }
1362 }
1363 }
1364
1365 // end
1366 if (exit.is_linked()) {
1367 exit.Bind();
1368 }
1369 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1370}
1371
1372
1373void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1374 VirtualFrame::SpilledScope spilled_scope;
1375 Comment cmnt(masm_, "[ ContinueStatement");
1376 CodeForStatementPosition(node);
1377 node->target()->continue_target()->Jump();
1378}
1379
1380
1381void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1382 VirtualFrame::SpilledScope spilled_scope;
1383 Comment cmnt(masm_, "[ BreakStatement");
1384 CodeForStatementPosition(node);
1385 node->target()->break_target()->Jump();
1386}
1387
1388
1389void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1390 VirtualFrame::SpilledScope spilled_scope;
1391 Comment cmnt(masm_, "[ ReturnStatement");
1392
1393 CodeForStatementPosition(node);
1394 LoadAndSpill(node->expression());
1395 if (function_return_is_shadowed_) {
1396 frame_->EmitPop(r0);
1397 function_return_.Jump();
1398 } else {
1399 // Pop the result from the frame and prepare the frame for
1400 // returning thus making it easier to merge.
1401 frame_->EmitPop(r0);
1402 frame_->PrepareForReturn();
1403
1404 function_return_.Jump();
1405 }
1406}
1407
1408
1409void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1410#ifdef DEBUG
1411 int original_height = frame_->height();
1412#endif
1413 VirtualFrame::SpilledScope spilled_scope;
1414 Comment cmnt(masm_, "[ WithEnterStatement");
1415 CodeForStatementPosition(node);
1416 LoadAndSpill(node->expression());
1417 if (node->is_catch_block()) {
1418 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1419 } else {
1420 frame_->CallRuntime(Runtime::kPushContext, 1);
1421 }
1422#ifdef DEBUG
1423 JumpTarget verified_true;
1424 __ cmp(r0, Operand(cp));
1425 verified_true.Branch(eq);
1426 __ stop("PushContext: r0 is expected to be the same as cp");
1427 verified_true.Bind();
1428#endif
1429 // Update context local.
1430 __ str(cp, frame_->Context());
1431 ASSERT(frame_->height() == original_height);
1432}
1433
1434
1435void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1436#ifdef DEBUG
1437 int original_height = frame_->height();
1438#endif
1439 VirtualFrame::SpilledScope spilled_scope;
1440 Comment cmnt(masm_, "[ WithExitStatement");
1441 CodeForStatementPosition(node);
1442 // Pop context.
1443 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1444 // Update context local.
1445 __ str(cp, frame_->Context());
1446 ASSERT(frame_->height() == original_height);
1447}
1448
1449
1450void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1451#ifdef DEBUG
1452 int original_height = frame_->height();
1453#endif
1454 VirtualFrame::SpilledScope spilled_scope;
1455 Comment cmnt(masm_, "[ SwitchStatement");
1456 CodeForStatementPosition(node);
1457 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1458
1459 LoadAndSpill(node->tag());
1460
1461 JumpTarget next_test;
1462 JumpTarget fall_through;
1463 JumpTarget default_entry;
1464 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1465 ZoneList<CaseClause*>* cases = node->cases();
1466 int length = cases->length();
1467 CaseClause* default_clause = NULL;
1468
1469 for (int i = 0; i < length; i++) {
1470 CaseClause* clause = cases->at(i);
1471 if (clause->is_default()) {
1472 // Remember the default clause and compile it at the end.
1473 default_clause = clause;
1474 continue;
1475 }
1476
1477 Comment cmnt(masm_, "[ Case clause");
1478 // Compile the test.
1479 next_test.Bind();
1480 next_test.Unuse();
1481 // Duplicate TOS.
1482 __ ldr(r0, frame_->Top());
1483 frame_->EmitPush(r0);
1484 Comparison(eq, NULL, clause->label(), true);
1485 Branch(false, &next_test);
1486
1487 // Before entering the body from the test, remove the switch value from
1488 // the stack.
1489 frame_->Drop();
1490
1491 // Label the body so that fall through is enabled.
1492 if (i > 0 && cases->at(i - 1)->is_default()) {
1493 default_exit.Bind();
1494 } else {
1495 fall_through.Bind();
1496 fall_through.Unuse();
1497 }
1498 VisitStatementsAndSpill(clause->statements());
1499
1500 // If control flow can fall through from the body, jump to the next body
1501 // or the end of the statement.
1502 if (frame_ != NULL) {
1503 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1504 default_entry.Jump();
1505 } else {
1506 fall_through.Jump();
1507 }
1508 }
1509 }
1510
1511 // The final "test" removes the switch value.
1512 next_test.Bind();
1513 frame_->Drop();
1514
1515 // If there is a default clause, compile it.
1516 if (default_clause != NULL) {
1517 Comment cmnt(masm_, "[ Default clause");
1518 default_entry.Bind();
1519 VisitStatementsAndSpill(default_clause->statements());
1520 // If control flow can fall out of the default and there is a case after
1521 // it, jup to that case's body.
1522 if (frame_ != NULL && default_exit.is_bound()) {
1523 default_exit.Jump();
1524 }
1525 }
1526
1527 if (fall_through.is_linked()) {
1528 fall_through.Bind();
1529 }
1530
1531 if (node->break_target()->is_linked()) {
1532 node->break_target()->Bind();
1533 }
1534 node->break_target()->Unuse();
1535 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1536}
1537
1538
Steve Block3ce2e202009-11-05 08:53:23 +00001539void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001540#ifdef DEBUG
1541 int original_height = frame_->height();
1542#endif
1543 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001544 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 CodeForStatementPosition(node);
1546 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001547 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001548
Steve Block3ce2e202009-11-05 08:53:23 +00001549 // Label the top of the loop for the backward CFG edge. If the test
1550 // is always true we can use the continue target, and if the test is
1551 // always false there is no need.
1552 ConditionAnalysis info = AnalyzeCondition(node->cond());
1553 switch (info) {
1554 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001555 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1556 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001557 break;
1558 case ALWAYS_FALSE:
1559 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1560 break;
1561 case DONT_KNOW:
1562 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1563 body.Bind();
1564 break;
1565 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001566
Steve Block3ce2e202009-11-05 08:53:23 +00001567 CheckStack(); // TODO(1222600): ignore if body contains calls.
1568 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001569
Steve Blockd0582a62009-12-15 09:54:21 +00001570 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001571 switch (info) {
1572 case ALWAYS_TRUE:
1573 // If control can fall off the end of the body, jump back to the
1574 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001576 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001577 }
1578 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001579 case ALWAYS_FALSE:
1580 // If we have a continue in the body, we only have to bind its
1581 // jump target.
1582 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001583 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001584 }
Steve Block3ce2e202009-11-05 08:53:23 +00001585 break;
1586 case DONT_KNOW:
1587 // We have to compile the test expression if it can be reached by
1588 // control flow falling out of the body or via continue.
1589 if (node->continue_target()->is_linked()) {
1590 node->continue_target()->Bind();
1591 }
1592 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001593 Comment cmnt(masm_, "[ DoWhileCondition");
1594 CodeForDoWhileConditionPosition(node);
1595 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001597 // A invalid frame here indicates that control did not
1598 // fall out of the test expression.
1599 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 }
1601 }
1602 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001603 }
1604
1605 if (node->break_target()->is_linked()) {
1606 node->break_target()->Bind();
1607 }
Steve Block3ce2e202009-11-05 08:53:23 +00001608 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1609}
1610
1611
1612void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1613#ifdef DEBUG
1614 int original_height = frame_->height();
1615#endif
1616 VirtualFrame::SpilledScope spilled_scope;
1617 Comment cmnt(masm_, "[ WhileStatement");
1618 CodeForStatementPosition(node);
1619
1620 // If the test is never true and has no side effects there is no need
1621 // to compile the test or body.
1622 ConditionAnalysis info = AnalyzeCondition(node->cond());
1623 if (info == ALWAYS_FALSE) return;
1624
1625 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1626
1627 // Label the top of the loop with the continue target for the backward
1628 // CFG edge.
1629 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1630 node->continue_target()->Bind();
1631
1632 if (info == DONT_KNOW) {
1633 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001634 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001635 if (has_valid_frame()) {
1636 // A NULL frame indicates that control did not fall out of the
1637 // test expression.
1638 Branch(false, node->break_target());
1639 }
1640 if (has_valid_frame() || body.is_linked()) {
1641 body.Bind();
1642 }
1643 }
1644
1645 if (has_valid_frame()) {
1646 CheckStack(); // TODO(1222600): ignore if body contains calls.
1647 VisitAndSpill(node->body());
1648
1649 // If control flow can fall out of the body, jump back to the top.
1650 if (has_valid_frame()) {
1651 node->continue_target()->Jump();
1652 }
1653 }
1654 if (node->break_target()->is_linked()) {
1655 node->break_target()->Bind();
1656 }
1657 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1658}
1659
1660
1661void CodeGenerator::VisitForStatement(ForStatement* node) {
1662#ifdef DEBUG
1663 int original_height = frame_->height();
1664#endif
1665 VirtualFrame::SpilledScope spilled_scope;
1666 Comment cmnt(masm_, "[ ForStatement");
1667 CodeForStatementPosition(node);
1668 if (node->init() != NULL) {
1669 VisitAndSpill(node->init());
1670 }
1671
1672 // If the test is never true there is no need to compile the test or
1673 // body.
1674 ConditionAnalysis info = AnalyzeCondition(node->cond());
1675 if (info == ALWAYS_FALSE) return;
1676
1677 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1678
1679 // If there is no update statement, label the top of the loop with the
1680 // continue target, otherwise with the loop target.
1681 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1682 if (node->next() == NULL) {
1683 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1684 node->continue_target()->Bind();
1685 } else {
1686 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1687 loop.Bind();
1688 }
1689
1690 // If the test is always true, there is no need to compile it.
1691 if (info == DONT_KNOW) {
1692 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001693 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001694 if (has_valid_frame()) {
1695 Branch(false, node->break_target());
1696 }
1697 if (has_valid_frame() || body.is_linked()) {
1698 body.Bind();
1699 }
1700 }
1701
1702 if (has_valid_frame()) {
1703 CheckStack(); // TODO(1222600): ignore if body contains calls.
1704 VisitAndSpill(node->body());
1705
1706 if (node->next() == NULL) {
1707 // If there is no update statement and control flow can fall out
1708 // of the loop, jump directly to the continue label.
1709 if (has_valid_frame()) {
1710 node->continue_target()->Jump();
1711 }
1712 } else {
1713 // If there is an update statement and control flow can reach it
1714 // via falling out of the body of the loop or continuing, we
1715 // compile the update statement.
1716 if (node->continue_target()->is_linked()) {
1717 node->continue_target()->Bind();
1718 }
1719 if (has_valid_frame()) {
1720 // Record source position of the statement as this code which is
1721 // after the code for the body actually belongs to the loop
1722 // statement and not the body.
1723 CodeForStatementPosition(node);
1724 VisitAndSpill(node->next());
1725 loop.Jump();
1726 }
1727 }
1728 }
1729 if (node->break_target()->is_linked()) {
1730 node->break_target()->Bind();
1731 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1733}
1734
1735
1736void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1737#ifdef DEBUG
1738 int original_height = frame_->height();
1739#endif
1740 VirtualFrame::SpilledScope spilled_scope;
1741 Comment cmnt(masm_, "[ ForInStatement");
1742 CodeForStatementPosition(node);
1743
1744 JumpTarget primitive;
1745 JumpTarget jsobject;
1746 JumpTarget fixed_array;
1747 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1748 JumpTarget end_del_check;
1749 JumpTarget exit;
1750
1751 // Get the object to enumerate over (converted to JSObject).
1752 LoadAndSpill(node->enumerable());
1753
1754 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1755 // to the specification. 12.6.4 mandates a call to ToObject.
1756 frame_->EmitPop(r0);
1757 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1758 __ cmp(r0, ip);
1759 exit.Branch(eq);
1760 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1761 __ cmp(r0, ip);
1762 exit.Branch(eq);
1763
1764 // Stack layout in body:
1765 // [iteration counter (Smi)]
1766 // [length of array]
1767 // [FixedArray]
1768 // [Map or 0]
1769 // [Object]
1770
1771 // Check if enumerable is already a JSObject
1772 __ tst(r0, Operand(kSmiTagMask));
1773 primitive.Branch(eq);
1774 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1775 jsobject.Branch(hs);
1776
1777 primitive.Bind();
1778 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00001779 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001780
1781 jsobject.Bind();
1782 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00001783 // r0: value to be iterated over
1784 frame_->EmitPush(r0); // Push the object being iterated over.
1785
1786 // Check cache validity in generated code. This is a fast case for
1787 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1788 // guarantee cache validity, call the runtime system to check cache
1789 // validity or get the property names in a fixed array.
1790 JumpTarget call_runtime;
1791 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1792 JumpTarget check_prototype;
1793 JumpTarget use_cache;
1794 __ mov(r1, Operand(r0));
1795 loop.Bind();
1796 // Check that there are no elements.
1797 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
1798 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
1799 __ cmp(r2, r4);
1800 call_runtime.Branch(ne);
1801 // Check that instance descriptors are not empty so that we can
1802 // check for an enum cache. Leave the map in r3 for the subsequent
1803 // prototype load.
1804 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
1805 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
1806 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
1807 __ cmp(r2, ip);
1808 call_runtime.Branch(eq);
1809 // Check that there in an enum cache in the non-empty instance
1810 // descriptors. This is the case if the next enumeration index
1811 // field does not contain a smi.
1812 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
1813 __ tst(r2, Operand(kSmiTagMask));
1814 call_runtime.Branch(eq);
1815 // For all objects but the receiver, check that the cache is empty.
1816 // r4: empty fixed array root.
1817 __ cmp(r1, r0);
1818 check_prototype.Branch(eq);
1819 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1820 __ cmp(r2, r4);
1821 call_runtime.Branch(ne);
1822 check_prototype.Bind();
1823 // Load the prototype from the map and loop if non-null.
1824 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
1825 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1826 __ cmp(r1, ip);
1827 loop.Branch(ne);
1828 // The enum cache is valid. Load the map of the object being
1829 // iterated over and use the cache for the iteration.
1830 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
1831 use_cache.Jump();
1832
1833 call_runtime.Bind();
1834 // Call the runtime to get the property names for the object.
1835 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00001836 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1837
Steve Blockd0582a62009-12-15 09:54:21 +00001838 // If we got a map from the runtime call, we can do a fast
1839 // modification check. Otherwise, we got a fixed array, and we have
1840 // to do a slow check.
1841 // r0: map or fixed array (result from call to
1842 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00001843 __ mov(r2, Operand(r0));
1844 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1845 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1846 __ cmp(r1, ip);
1847 fixed_array.Branch(ne);
1848
Steve Blockd0582a62009-12-15 09:54:21 +00001849 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001850 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001851 // r0: map (either the result from a call to
1852 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1853 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 __ mov(r1, Operand(r0));
1855 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1856 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1857 __ ldr(r2,
1858 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1859
1860 frame_->EmitPush(r0); // map
1861 frame_->EmitPush(r2); // enum cache bridge cache
1862 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1863 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1864 frame_->EmitPush(r0);
1865 __ mov(r0, Operand(Smi::FromInt(0)));
1866 frame_->EmitPush(r0);
1867 entry.Jump();
1868
1869 fixed_array.Bind();
1870 __ mov(r1, Operand(Smi::FromInt(0)));
1871 frame_->EmitPush(r1); // insert 0 in place of Map
1872 frame_->EmitPush(r0);
1873
1874 // Push the length of the array and the initial index onto the stack.
1875 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1876 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1877 frame_->EmitPush(r0);
1878 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1879 frame_->EmitPush(r0);
1880
1881 // Condition.
1882 entry.Bind();
1883 // sp[0] : index
1884 // sp[1] : array/enum cache length
1885 // sp[2] : array or enum cache
1886 // sp[3] : 0 or map
1887 // sp[4] : enumerable
1888 // Grab the current frame's height for the break and continue
1889 // targets only after all the state is pushed on the frame.
1890 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1891 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1892
1893 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1894 __ ldr(r1, frame_->ElementAt(1)); // load the length
1895 __ cmp(r0, Operand(r1)); // compare to the array length
1896 node->break_target()->Branch(hs);
1897
1898 __ ldr(r0, frame_->ElementAt(0));
1899
1900 // Get the i'th entry of the array.
1901 __ ldr(r2, frame_->ElementAt(2));
1902 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1903 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1904
1905 // Get Map or 0.
1906 __ ldr(r2, frame_->ElementAt(3));
1907 // Check if this (still) matches the map of the enumerable.
1908 // If not, we have to filter the key.
1909 __ ldr(r1, frame_->ElementAt(4));
1910 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1911 __ cmp(r1, Operand(r2));
1912 end_del_check.Branch(eq);
1913
1914 // Convert the entry to a string (or null if it isn't a property anymore).
1915 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1916 frame_->EmitPush(r0);
1917 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00001918 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001919 __ mov(r3, Operand(r0));
1920
1921 // If the property has been removed while iterating, we just skip it.
1922 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1923 __ cmp(r3, ip);
1924 node->continue_target()->Branch(eq);
1925
1926 end_del_check.Bind();
1927 // Store the entry in the 'each' expression and take another spin in the
1928 // loop. r3: i'th entry of the enum cache (or string there of)
1929 frame_->EmitPush(r3); // push entry
1930 { Reference each(this, node->each());
1931 if (!each.is_illegal()) {
1932 if (each.size() > 0) {
1933 __ ldr(r0, frame_->ElementAt(each.size()));
1934 frame_->EmitPush(r0);
1935 }
1936 // If the reference was to a slot we rely on the convenient property
1937 // that it doesn't matter whether a value (eg, r3 pushed above) is
1938 // right on top of or right underneath a zero-sized reference.
1939 each.SetValue(NOT_CONST_INIT);
1940 if (each.size() > 0) {
1941 // It's safe to pop the value lying on top of the reference before
1942 // unloading the reference itself (which preserves the top of stack,
1943 // ie, now the topmost value of the non-zero sized reference), since
1944 // we will discard the top of stack after unloading the reference
1945 // anyway.
1946 frame_->EmitPop(r0);
1947 }
1948 }
1949 }
1950 // Discard the i'th entry pushed above or else the remainder of the
1951 // reference, whichever is currently on top of the stack.
1952 frame_->Drop();
1953
1954 // Body.
1955 CheckStack(); // TODO(1222600): ignore if body contains calls.
1956 VisitAndSpill(node->body());
1957
1958 // Next. Reestablish a spilled frame in case we are coming here via
1959 // a continue in the body.
1960 node->continue_target()->Bind();
1961 frame_->SpillAll();
1962 frame_->EmitPop(r0);
1963 __ add(r0, r0, Operand(Smi::FromInt(1)));
1964 frame_->EmitPush(r0);
1965 entry.Jump();
1966
1967 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1968 // any frame.
1969 node->break_target()->Bind();
1970 frame_->Drop(5);
1971
1972 // Exit.
1973 exit.Bind();
1974 node->continue_target()->Unuse();
1975 node->break_target()->Unuse();
1976 ASSERT(frame_->height() == original_height);
1977}
1978
1979
Steve Block3ce2e202009-11-05 08:53:23 +00001980void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001981#ifdef DEBUG
1982 int original_height = frame_->height();
1983#endif
1984 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001985 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 CodeForStatementPosition(node);
1987
1988 JumpTarget try_block;
1989 JumpTarget exit;
1990
1991 try_block.Call();
1992 // --- Catch block ---
1993 frame_->EmitPush(r0);
1994
1995 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00001996 Variable* catch_var = node->catch_var()->var();
1997 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
1998 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00001999
2000 // Remove the exception from the stack.
2001 frame_->Drop();
2002
2003 VisitStatementsAndSpill(node->catch_block()->statements());
2004 if (frame_ != NULL) {
2005 exit.Jump();
2006 }
2007
2008
2009 // --- Try block ---
2010 try_block.Bind();
2011
2012 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2013 int handler_height = frame_->height();
2014
2015 // Shadow the labels for all escapes from the try block, including
2016 // returns. During shadowing, the original label is hidden as the
2017 // LabelShadow and operations on the original actually affect the
2018 // shadowing label.
2019 //
2020 // We should probably try to unify the escaping labels and the return
2021 // label.
2022 int nof_escapes = node->escaping_targets()->length();
2023 List<ShadowTarget*> shadows(1 + nof_escapes);
2024
2025 // Add the shadow target for the function return.
2026 static const int kReturnShadowIndex = 0;
2027 shadows.Add(new ShadowTarget(&function_return_));
2028 bool function_return_was_shadowed = function_return_is_shadowed_;
2029 function_return_is_shadowed_ = true;
2030 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2031
2032 // Add the remaining shadow targets.
2033 for (int i = 0; i < nof_escapes; i++) {
2034 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2035 }
2036
2037 // Generate code for the statements in the try block.
2038 VisitStatementsAndSpill(node->try_block()->statements());
2039
2040 // Stop the introduced shadowing and count the number of required unlinks.
2041 // After shadowing stops, the original labels are unshadowed and the
2042 // LabelShadows represent the formerly shadowing labels.
2043 bool has_unlinks = false;
2044 for (int i = 0; i < shadows.length(); i++) {
2045 shadows[i]->StopShadowing();
2046 has_unlinks = has_unlinks || shadows[i]->is_linked();
2047 }
2048 function_return_is_shadowed_ = function_return_was_shadowed;
2049
2050 // Get an external reference to the handler address.
2051 ExternalReference handler_address(Top::k_handler_address);
2052
2053 // If we can fall off the end of the try block, unlink from try chain.
2054 if (has_valid_frame()) {
2055 // The next handler address is on top of the frame. Unlink from
2056 // the handler list and drop the rest of this handler from the
2057 // frame.
2058 ASSERT(StackHandlerConstants::kNextOffset == 0);
2059 frame_->EmitPop(r1);
2060 __ mov(r3, Operand(handler_address));
2061 __ str(r1, MemOperand(r3));
2062 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2063 if (has_unlinks) {
2064 exit.Jump();
2065 }
2066 }
2067
2068 // Generate unlink code for the (formerly) shadowing labels that have been
2069 // jumped to. Deallocate each shadow target.
2070 for (int i = 0; i < shadows.length(); i++) {
2071 if (shadows[i]->is_linked()) {
2072 // Unlink from try chain;
2073 shadows[i]->Bind();
2074 // Because we can be jumping here (to spilled code) from unspilled
2075 // code, we need to reestablish a spilled frame at this block.
2076 frame_->SpillAll();
2077
2078 // Reload sp from the top handler, because some statements that we
2079 // break from (eg, for...in) may have left stuff on the stack.
2080 __ mov(r3, Operand(handler_address));
2081 __ ldr(sp, MemOperand(r3));
2082 frame_->Forget(frame_->height() - handler_height);
2083
2084 ASSERT(StackHandlerConstants::kNextOffset == 0);
2085 frame_->EmitPop(r1);
2086 __ str(r1, MemOperand(r3));
2087 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2088
2089 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2090 frame_->PrepareForReturn();
2091 }
2092 shadows[i]->other_target()->Jump();
2093 }
2094 }
2095
2096 exit.Bind();
2097 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2098}
2099
2100
Steve Block3ce2e202009-11-05 08:53:23 +00002101void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002102#ifdef DEBUG
2103 int original_height = frame_->height();
2104#endif
2105 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002106 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 CodeForStatementPosition(node);
2108
2109 // State: Used to keep track of reason for entering the finally
2110 // block. Should probably be extended to hold information for
2111 // break/continue from within the try block.
2112 enum { FALLING, THROWING, JUMPING };
2113
2114 JumpTarget try_block;
2115 JumpTarget finally_block;
2116
2117 try_block.Call();
2118
2119 frame_->EmitPush(r0); // save exception object on the stack
2120 // In case of thrown exceptions, this is where we continue.
2121 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2122 finally_block.Jump();
2123
2124 // --- Try block ---
2125 try_block.Bind();
2126
2127 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2128 int handler_height = frame_->height();
2129
2130 // Shadow the labels for all escapes from the try block, including
2131 // returns. Shadowing hides the original label as the LabelShadow and
2132 // operations on the original actually affect the shadowing label.
2133 //
2134 // We should probably try to unify the escaping labels and the return
2135 // label.
2136 int nof_escapes = node->escaping_targets()->length();
2137 List<ShadowTarget*> shadows(1 + nof_escapes);
2138
2139 // Add the shadow target for the function return.
2140 static const int kReturnShadowIndex = 0;
2141 shadows.Add(new ShadowTarget(&function_return_));
2142 bool function_return_was_shadowed = function_return_is_shadowed_;
2143 function_return_is_shadowed_ = true;
2144 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2145
2146 // Add the remaining shadow targets.
2147 for (int i = 0; i < nof_escapes; i++) {
2148 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2149 }
2150
2151 // Generate code for the statements in the try block.
2152 VisitStatementsAndSpill(node->try_block()->statements());
2153
2154 // Stop the introduced shadowing and count the number of required unlinks.
2155 // After shadowing stops, the original labels are unshadowed and the
2156 // LabelShadows represent the formerly shadowing labels.
2157 int nof_unlinks = 0;
2158 for (int i = 0; i < shadows.length(); i++) {
2159 shadows[i]->StopShadowing();
2160 if (shadows[i]->is_linked()) nof_unlinks++;
2161 }
2162 function_return_is_shadowed_ = function_return_was_shadowed;
2163
2164 // Get an external reference to the handler address.
2165 ExternalReference handler_address(Top::k_handler_address);
2166
2167 // If we can fall off the end of the try block, unlink from the try
2168 // chain and set the state on the frame to FALLING.
2169 if (has_valid_frame()) {
2170 // The next handler address is on top of the frame.
2171 ASSERT(StackHandlerConstants::kNextOffset == 0);
2172 frame_->EmitPop(r1);
2173 __ mov(r3, Operand(handler_address));
2174 __ str(r1, MemOperand(r3));
2175 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2176
2177 // Fake a top of stack value (unneeded when FALLING) and set the
2178 // state in r2, then jump around the unlink blocks if any.
2179 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2180 frame_->EmitPush(r0);
2181 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2182 if (nof_unlinks > 0) {
2183 finally_block.Jump();
2184 }
2185 }
2186
2187 // Generate code to unlink and set the state for the (formerly)
2188 // shadowing targets that have been jumped to.
2189 for (int i = 0; i < shadows.length(); i++) {
2190 if (shadows[i]->is_linked()) {
2191 // If we have come from the shadowed return, the return value is
2192 // in (a non-refcounted reference to) r0. We must preserve it
2193 // until it is pushed.
2194 //
2195 // Because we can be jumping here (to spilled code) from
2196 // unspilled code, we need to reestablish a spilled frame at
2197 // this block.
2198 shadows[i]->Bind();
2199 frame_->SpillAll();
2200
2201 // Reload sp from the top handler, because some statements that
2202 // we break from (eg, for...in) may have left stuff on the
2203 // stack.
2204 __ mov(r3, Operand(handler_address));
2205 __ ldr(sp, MemOperand(r3));
2206 frame_->Forget(frame_->height() - handler_height);
2207
2208 // Unlink this handler and drop it from the frame. The next
2209 // handler address is currently on top of the frame.
2210 ASSERT(StackHandlerConstants::kNextOffset == 0);
2211 frame_->EmitPop(r1);
2212 __ str(r1, MemOperand(r3));
2213 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2214
2215 if (i == kReturnShadowIndex) {
2216 // If this label shadowed the function return, materialize the
2217 // return value on the stack.
2218 frame_->EmitPush(r0);
2219 } else {
2220 // Fake TOS for targets that shadowed breaks and continues.
2221 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2222 frame_->EmitPush(r0);
2223 }
2224 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2225 if (--nof_unlinks > 0) {
2226 // If this is not the last unlink block, jump around the next.
2227 finally_block.Jump();
2228 }
2229 }
2230 }
2231
2232 // --- Finally block ---
2233 finally_block.Bind();
2234
2235 // Push the state on the stack.
2236 frame_->EmitPush(r2);
2237
2238 // We keep two elements on the stack - the (possibly faked) result
2239 // and the state - while evaluating the finally block.
2240 //
2241 // Generate code for the statements in the finally block.
2242 VisitStatementsAndSpill(node->finally_block()->statements());
2243
2244 if (has_valid_frame()) {
2245 // Restore state and return value or faked TOS.
2246 frame_->EmitPop(r2);
2247 frame_->EmitPop(r0);
2248 }
2249
2250 // Generate code to jump to the right destination for all used
2251 // formerly shadowing targets. Deallocate each shadow target.
2252 for (int i = 0; i < shadows.length(); i++) {
2253 if (has_valid_frame() && shadows[i]->is_bound()) {
2254 JumpTarget* original = shadows[i]->other_target();
2255 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2256 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2257 JumpTarget skip;
2258 skip.Branch(ne);
2259 frame_->PrepareForReturn();
2260 original->Jump();
2261 skip.Bind();
2262 } else {
2263 original->Branch(eq);
2264 }
2265 }
2266 }
2267
2268 if (has_valid_frame()) {
2269 // Check if we need to rethrow the exception.
2270 JumpTarget exit;
2271 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2272 exit.Branch(ne);
2273
2274 // Rethrow exception.
2275 frame_->EmitPush(r0);
2276 frame_->CallRuntime(Runtime::kReThrow, 1);
2277
2278 // Done.
2279 exit.Bind();
2280 }
2281 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2282}
2283
2284
2285void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2286#ifdef DEBUG
2287 int original_height = frame_->height();
2288#endif
2289 VirtualFrame::SpilledScope spilled_scope;
2290 Comment cmnt(masm_, "[ DebuggerStatament");
2291 CodeForStatementPosition(node);
2292#ifdef ENABLE_DEBUGGER_SUPPORT
2293 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2294#endif
2295 // Ignore the return value.
2296 ASSERT(frame_->height() == original_height);
2297}
2298
2299
2300void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2301 VirtualFrame::SpilledScope spilled_scope;
2302 ASSERT(boilerplate->IsBoilerplate());
2303
Steve Block3ce2e202009-11-05 08:53:23 +00002304 __ mov(r0, Operand(boilerplate));
Leon Clarkee46be812010-01-19 14:06:41 +00002305 // Use the fast case closure allocation code that allocates in new
2306 // space for nested functions that don't need literals cloning.
2307 if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
2308 FastNewClosureStub stub;
2309 frame_->EmitPush(r0);
2310 frame_->CallStub(&stub, 1);
2311 frame_->EmitPush(r0);
2312 } else {
2313 // Create a new closure.
2314 frame_->EmitPush(cp);
2315 frame_->EmitPush(r0);
2316 frame_->CallRuntime(Runtime::kNewClosure, 2);
2317 frame_->EmitPush(r0);
2318 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002319}
2320
2321
2322void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2323#ifdef DEBUG
2324 int original_height = frame_->height();
2325#endif
2326 VirtualFrame::SpilledScope spilled_scope;
2327 Comment cmnt(masm_, "[ FunctionLiteral");
2328
2329 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002330 Handle<JSFunction> boilerplate =
2331 Compiler::BuildBoilerplate(node, script_, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002332 // Check for stack-overflow exception.
2333 if (HasStackOverflow()) {
2334 ASSERT(frame_->height() == original_height);
2335 return;
2336 }
2337 InstantiateBoilerplate(boilerplate);
2338 ASSERT(frame_->height() == original_height + 1);
2339}
2340
2341
2342void CodeGenerator::VisitFunctionBoilerplateLiteral(
2343 FunctionBoilerplateLiteral* node) {
2344#ifdef DEBUG
2345 int original_height = frame_->height();
2346#endif
2347 VirtualFrame::SpilledScope spilled_scope;
2348 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2349 InstantiateBoilerplate(node->boilerplate());
2350 ASSERT(frame_->height() == original_height + 1);
2351}
2352
2353
2354void CodeGenerator::VisitConditional(Conditional* node) {
2355#ifdef DEBUG
2356 int original_height = frame_->height();
2357#endif
2358 VirtualFrame::SpilledScope spilled_scope;
2359 Comment cmnt(masm_, "[ Conditional");
2360 JumpTarget then;
2361 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002362 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002363 if (has_valid_frame()) {
2364 Branch(false, &else_);
2365 }
2366 if (has_valid_frame() || then.is_linked()) {
2367 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002368 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002369 }
2370 if (else_.is_linked()) {
2371 JumpTarget exit;
2372 if (has_valid_frame()) exit.Jump();
2373 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002374 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002375 if (exit.is_linked()) exit.Bind();
2376 }
2377 ASSERT(frame_->height() == original_height + 1);
2378}
2379
2380
2381void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2382 VirtualFrame::SpilledScope spilled_scope;
2383 if (slot->type() == Slot::LOOKUP) {
2384 ASSERT(slot->var()->is_dynamic());
2385
2386 JumpTarget slow;
2387 JumpTarget done;
2388
2389 // Generate fast-case code for variables that might be shadowed by
2390 // eval-introduced variables. Eval is used a lot without
2391 // introducing variables. In those cases, we do not want to
2392 // perform a runtime call for all variables in the scope
2393 // containing the eval.
2394 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2395 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2396 // If there was no control flow to slow, we can exit early.
2397 if (!slow.is_linked()) {
2398 frame_->EmitPush(r0);
2399 return;
2400 }
2401
2402 done.Jump();
2403
2404 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2405 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2406 // Only generate the fast case for locals that rewrite to slots.
2407 // This rules out argument loads.
2408 if (potential_slot != NULL) {
2409 __ ldr(r0,
2410 ContextSlotOperandCheckExtensions(potential_slot,
2411 r1,
2412 r2,
2413 &slow));
2414 if (potential_slot->var()->mode() == Variable::CONST) {
2415 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2416 __ cmp(r0, ip);
2417 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2418 }
2419 // There is always control flow to slow from
2420 // ContextSlotOperandCheckExtensions so we have to jump around
2421 // it.
2422 done.Jump();
2423 }
2424 }
2425
2426 slow.Bind();
2427 frame_->EmitPush(cp);
2428 __ mov(r0, Operand(slot->var()->name()));
2429 frame_->EmitPush(r0);
2430
2431 if (typeof_state == INSIDE_TYPEOF) {
2432 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2433 } else {
2434 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2435 }
2436
2437 done.Bind();
2438 frame_->EmitPush(r0);
2439
2440 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00002441 // Special handling for locals allocated in registers.
2442 __ ldr(r0, SlotOperand(slot, r2));
2443 frame_->EmitPush(r0);
2444 if (slot->var()->mode() == Variable::CONST) {
2445 // Const slots may contain 'the hole' value (the constant hasn't been
2446 // initialized yet) which needs to be converted into the 'undefined'
2447 // value.
2448 Comment cmnt(masm_, "[ Unhole const");
2449 frame_->EmitPop(r0);
2450 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2451 __ cmp(r0, ip);
2452 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2453 frame_->EmitPush(r0);
2454 }
2455 }
2456}
2457
2458
Leon Clarkee46be812010-01-19 14:06:41 +00002459void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2460 ASSERT(slot != NULL);
2461 if (slot->type() == Slot::LOOKUP) {
2462 ASSERT(slot->var()->is_dynamic());
2463
2464 // For now, just do a runtime call.
2465 frame_->EmitPush(cp);
2466 __ mov(r0, Operand(slot->var()->name()));
2467 frame_->EmitPush(r0);
2468
2469 if (init_state == CONST_INIT) {
2470 // Same as the case for a normal store, but ignores attribute
2471 // (e.g. READ_ONLY) of context slot so that we can initialize
2472 // const properties (introduced via eval("const foo = (some
2473 // expr);")). Also, uses the current function context instead of
2474 // the top context.
2475 //
2476 // Note that we must declare the foo upon entry of eval(), via a
2477 // context slot declaration, but we cannot initialize it at the
2478 // same time, because the const declaration may be at the end of
2479 // the eval code (sigh...) and the const variable may have been
2480 // used before (where its value is 'undefined'). Thus, we can only
2481 // do the initialization when we actually encounter the expression
2482 // and when the expression operands are defined and valid, and
2483 // thus we need the split into 2 operations: declaration of the
2484 // context slot followed by initialization.
2485 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2486 } else {
2487 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2488 }
2489 // Storing a variable must keep the (new) value on the expression
2490 // stack. This is necessary for compiling assignment expressions.
2491 frame_->EmitPush(r0);
2492
2493 } else {
2494 ASSERT(!slot->var()->is_dynamic());
2495
2496 JumpTarget exit;
2497 if (init_state == CONST_INIT) {
2498 ASSERT(slot->var()->mode() == Variable::CONST);
2499 // Only the first const initialization must be executed (the slot
2500 // still contains 'the hole' value). When the assignment is
2501 // executed, the code is identical to a normal store (see below).
2502 Comment cmnt(masm_, "[ Init const");
2503 __ ldr(r2, SlotOperand(slot, r2));
2504 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2505 __ cmp(r2, ip);
2506 exit.Branch(ne);
2507 }
2508
2509 // We must execute the store. Storing a variable must keep the
2510 // (new) value on the stack. This is necessary for compiling
2511 // assignment expressions.
2512 //
2513 // Note: We will reach here even with slot->var()->mode() ==
2514 // Variable::CONST because of const declarations which will
2515 // initialize consts to 'the hole' value and by doing so, end up
2516 // calling this code. r2 may be loaded with context; used below in
2517 // RecordWrite.
2518 frame_->EmitPop(r0);
2519 __ str(r0, SlotOperand(slot, r2));
2520 frame_->EmitPush(r0);
2521 if (slot->type() == Slot::CONTEXT) {
2522 // Skip write barrier if the written value is a smi.
2523 __ tst(r0, Operand(kSmiTagMask));
2524 exit.Branch(eq);
2525 // r2 is loaded with context when calling SlotOperand above.
2526 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2527 __ mov(r3, Operand(offset));
2528 __ RecordWrite(r2, r3, r1);
2529 }
2530 // If we definitely did not jump over the assignment, we do not need
2531 // to bind the exit label. Doing so can defeat peephole
2532 // optimization.
2533 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
2534 exit.Bind();
2535 }
2536 }
2537}
2538
2539
Steve Blocka7e24c12009-10-30 11:49:00 +00002540void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2541 TypeofState typeof_state,
2542 Register tmp,
2543 Register tmp2,
2544 JumpTarget* slow) {
2545 // Check that no extension objects have been created by calls to
2546 // eval from the current scope to the global scope.
2547 Register context = cp;
2548 Scope* s = scope();
2549 while (s != NULL) {
2550 if (s->num_heap_slots() > 0) {
2551 if (s->calls_eval()) {
2552 // Check that extension is NULL.
2553 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2554 __ tst(tmp2, tmp2);
2555 slow->Branch(ne);
2556 }
2557 // Load next context in chain.
2558 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2559 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2560 context = tmp;
2561 }
2562 // If no outer scope calls eval, we do not need to check more
2563 // context extensions.
2564 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2565 s = s->outer_scope();
2566 }
2567
2568 if (s->is_eval_scope()) {
2569 Label next, fast;
2570 if (!context.is(tmp)) {
2571 __ mov(tmp, Operand(context));
2572 }
2573 __ bind(&next);
2574 // Terminate at global context.
2575 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2576 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2577 __ cmp(tmp2, ip);
2578 __ b(eq, &fast);
2579 // Check that extension is NULL.
2580 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2581 __ tst(tmp2, tmp2);
2582 slow->Branch(ne);
2583 // Load next context in chain.
2584 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2585 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2586 __ b(&next);
2587 __ bind(&fast);
2588 }
2589
2590 // All extension objects were empty and it is safe to use a global
2591 // load IC call.
2592 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2593 // Load the global object.
2594 LoadGlobal();
2595 // Setup the name register.
2596 Result name(r2);
2597 __ mov(r2, Operand(slot->var()->name()));
2598 // Call IC stub.
2599 if (typeof_state == INSIDE_TYPEOF) {
2600 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
2601 } else {
2602 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
2603 }
2604
2605 // Drop the global object. The result is in r0.
2606 frame_->Drop();
2607}
2608
2609
2610void CodeGenerator::VisitSlot(Slot* node) {
2611#ifdef DEBUG
2612 int original_height = frame_->height();
2613#endif
2614 VirtualFrame::SpilledScope spilled_scope;
2615 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002616 LoadFromSlot(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002617 ASSERT(frame_->height() == original_height + 1);
2618}
2619
2620
2621void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2622#ifdef DEBUG
2623 int original_height = frame_->height();
2624#endif
2625 VirtualFrame::SpilledScope spilled_scope;
2626 Comment cmnt(masm_, "[ VariableProxy");
2627
2628 Variable* var = node->var();
2629 Expression* expr = var->rewrite();
2630 if (expr != NULL) {
2631 Visit(expr);
2632 } else {
2633 ASSERT(var->is_global());
2634 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002635 ref.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002636 }
2637 ASSERT(frame_->height() == original_height + 1);
2638}
2639
2640
2641void CodeGenerator::VisitLiteral(Literal* node) {
2642#ifdef DEBUG
2643 int original_height = frame_->height();
2644#endif
2645 VirtualFrame::SpilledScope spilled_scope;
2646 Comment cmnt(masm_, "[ Literal");
2647 __ mov(r0, Operand(node->handle()));
2648 frame_->EmitPush(r0);
2649 ASSERT(frame_->height() == original_height + 1);
2650}
2651
2652
2653void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2654#ifdef DEBUG
2655 int original_height = frame_->height();
2656#endif
2657 VirtualFrame::SpilledScope spilled_scope;
2658 Comment cmnt(masm_, "[ RexExp Literal");
2659
2660 // Retrieve the literal array and check the allocated entry.
2661
2662 // Load the function of this activation.
2663 __ ldr(r1, frame_->Function());
2664
2665 // Load the literals array of the function.
2666 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2667
2668 // Load the literal at the ast saved index.
2669 int literal_offset =
2670 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2671 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2672
2673 JumpTarget done;
2674 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2675 __ cmp(r2, ip);
2676 done.Branch(ne);
2677
2678 // If the entry is undefined we call the runtime system to computed
2679 // the literal.
2680 frame_->EmitPush(r1); // literal array (0)
2681 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2682 frame_->EmitPush(r0); // literal index (1)
2683 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2684 frame_->EmitPush(r0);
2685 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2686 frame_->EmitPush(r0);
2687 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2688 __ mov(r2, Operand(r0));
2689
2690 done.Bind();
2691 // Push the literal.
2692 frame_->EmitPush(r2);
2693 ASSERT(frame_->height() == original_height + 1);
2694}
2695
2696
Steve Blocka7e24c12009-10-30 11:49:00 +00002697void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2698#ifdef DEBUG
2699 int original_height = frame_->height();
2700#endif
2701 VirtualFrame::SpilledScope spilled_scope;
2702 Comment cmnt(masm_, "[ ObjectLiteral");
2703
Steve Blocka7e24c12009-10-30 11:49:00 +00002704 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002705 __ ldr(r2, frame_->Function());
2706 // Literal array.
2707 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2708 // Literal index.
2709 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2710 // Constant properties.
2711 __ mov(r0, Operand(node->constant_properties()));
2712 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2713 if (node->depth() > 1) {
2714 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
2715 } else {
2716 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002717 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002718 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002719 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002720
2721 for (int i = 0; i < node->properties()->length(); i++) {
2722 ObjectLiteral::Property* property = node->properties()->at(i);
2723 Literal* key = property->key();
2724 Expression* value = property->value();
2725 switch (property->kind()) {
2726 case ObjectLiteral::Property::CONSTANT:
2727 break;
2728 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2729 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2730 // else fall through
2731 case ObjectLiteral::Property::COMPUTED: // fall through
2732 case ObjectLiteral::Property::PROTOTYPE: {
2733 frame_->EmitPush(r0); // dup the result
2734 LoadAndSpill(key);
2735 LoadAndSpill(value);
2736 frame_->CallRuntime(Runtime::kSetProperty, 3);
2737 // restore r0
2738 __ ldr(r0, frame_->Top());
2739 break;
2740 }
2741 case ObjectLiteral::Property::SETTER: {
2742 frame_->EmitPush(r0);
2743 LoadAndSpill(key);
2744 __ mov(r0, Operand(Smi::FromInt(1)));
2745 frame_->EmitPush(r0);
2746 LoadAndSpill(value);
2747 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2748 __ ldr(r0, frame_->Top());
2749 break;
2750 }
2751 case ObjectLiteral::Property::GETTER: {
2752 frame_->EmitPush(r0);
2753 LoadAndSpill(key);
2754 __ mov(r0, Operand(Smi::FromInt(0)));
2755 frame_->EmitPush(r0);
2756 LoadAndSpill(value);
2757 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2758 __ ldr(r0, frame_->Top());
2759 break;
2760 }
2761 }
2762 }
2763 ASSERT(frame_->height() == original_height + 1);
2764}
2765
2766
Steve Blocka7e24c12009-10-30 11:49:00 +00002767void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2768#ifdef DEBUG
2769 int original_height = frame_->height();
2770#endif
2771 VirtualFrame::SpilledScope spilled_scope;
2772 Comment cmnt(masm_, "[ ArrayLiteral");
2773
Steve Blocka7e24c12009-10-30 11:49:00 +00002774 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002775 __ ldr(r2, frame_->Function());
2776 // Literals array.
2777 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2778 // Literal index.
2779 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2780 // Constant elements.
2781 __ mov(r0, Operand(node->constant_elements()));
2782 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2783 if (node->depth() > 1) {
2784 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2785 } else {
2786 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002788 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002789 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002790
2791 // Generate code to set the elements in the array that are not
2792 // literals.
2793 for (int i = 0; i < node->values()->length(); i++) {
2794 Expression* value = node->values()->at(i);
2795
2796 // If value is a literal the property value is already set in the
2797 // boilerplate object.
2798 if (value->AsLiteral() != NULL) continue;
2799 // If value is a materialized literal the property value is already set
2800 // in the boilerplate object if it is simple.
2801 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2802
2803 // The property must be set by generated code.
2804 LoadAndSpill(value);
2805 frame_->EmitPop(r0);
2806
2807 // Fetch the object literal.
2808 __ ldr(r1, frame_->Top());
2809 // Get the elements array.
2810 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2811
2812 // Write to the indexed properties array.
2813 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2814 __ str(r0, FieldMemOperand(r1, offset));
2815
2816 // Update the write barrier for the array address.
2817 __ mov(r3, Operand(offset));
2818 __ RecordWrite(r1, r3, r2);
2819 }
2820 ASSERT(frame_->height() == original_height + 1);
2821}
2822
2823
2824void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2825#ifdef DEBUG
2826 int original_height = frame_->height();
2827#endif
2828 VirtualFrame::SpilledScope spilled_scope;
2829 // Call runtime routine to allocate the catch extension object and
2830 // assign the exception value to the catch variable.
2831 Comment cmnt(masm_, "[ CatchExtensionObject");
2832 LoadAndSpill(node->key());
2833 LoadAndSpill(node->value());
2834 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2835 frame_->EmitPush(r0);
2836 ASSERT(frame_->height() == original_height + 1);
2837}
2838
2839
2840void CodeGenerator::VisitAssignment(Assignment* node) {
2841#ifdef DEBUG
2842 int original_height = frame_->height();
2843#endif
2844 VirtualFrame::SpilledScope spilled_scope;
2845 Comment cmnt(masm_, "[ Assignment");
2846
2847 { Reference target(this, node->target());
2848 if (target.is_illegal()) {
2849 // Fool the virtual frame into thinking that we left the assignment's
2850 // value on the frame.
2851 __ mov(r0, Operand(Smi::FromInt(0)));
2852 frame_->EmitPush(r0);
2853 ASSERT(frame_->height() == original_height + 1);
2854 return;
2855 }
2856
2857 if (node->op() == Token::ASSIGN ||
2858 node->op() == Token::INIT_VAR ||
2859 node->op() == Token::INIT_CONST) {
2860 LoadAndSpill(node->value());
2861
2862 } else {
2863 // +=, *= and similar binary assignments.
2864 // Get the old value of the lhs.
Steve Blockd0582a62009-12-15 09:54:21 +00002865 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002866 Literal* literal = node->value()->AsLiteral();
2867 bool overwrite =
2868 (node->value()->AsBinaryOperation() != NULL &&
2869 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2870 if (literal != NULL && literal->handle()->IsSmi()) {
2871 SmiOperation(node->binary_op(),
2872 literal->handle(),
2873 false,
2874 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2875 frame_->EmitPush(r0);
2876
2877 } else {
2878 LoadAndSpill(node->value());
2879 GenericBinaryOperation(node->binary_op(),
2880 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2881 frame_->EmitPush(r0);
2882 }
2883 }
2884
2885 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2886 if (var != NULL &&
2887 (var->mode() == Variable::CONST) &&
2888 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2889 // Assignment ignored - leave the value on the stack.
2890
2891 } else {
2892 CodeForSourcePosition(node->position());
2893 if (node->op() == Token::INIT_CONST) {
2894 // Dynamic constant initializations must use the function context
2895 // and initialize the actual constant declared. Dynamic variable
2896 // initializations are simply assignments and use SetValue.
2897 target.SetValue(CONST_INIT);
2898 } else {
2899 target.SetValue(NOT_CONST_INIT);
2900 }
2901 }
2902 }
2903 ASSERT(frame_->height() == original_height + 1);
2904}
2905
2906
2907void CodeGenerator::VisitThrow(Throw* node) {
2908#ifdef DEBUG
2909 int original_height = frame_->height();
2910#endif
2911 VirtualFrame::SpilledScope spilled_scope;
2912 Comment cmnt(masm_, "[ Throw");
2913
2914 LoadAndSpill(node->exception());
2915 CodeForSourcePosition(node->position());
2916 frame_->CallRuntime(Runtime::kThrow, 1);
2917 frame_->EmitPush(r0);
2918 ASSERT(frame_->height() == original_height + 1);
2919}
2920
2921
2922void CodeGenerator::VisitProperty(Property* node) {
2923#ifdef DEBUG
2924 int original_height = frame_->height();
2925#endif
2926 VirtualFrame::SpilledScope spilled_scope;
2927 Comment cmnt(masm_, "[ Property");
2928
2929 { Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002930 property.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002931 }
2932 ASSERT(frame_->height() == original_height + 1);
2933}
2934
2935
2936void CodeGenerator::VisitCall(Call* node) {
2937#ifdef DEBUG
2938 int original_height = frame_->height();
2939#endif
2940 VirtualFrame::SpilledScope spilled_scope;
2941 Comment cmnt(masm_, "[ Call");
2942
2943 Expression* function = node->expression();
2944 ZoneList<Expression*>* args = node->arguments();
2945
2946 // Standard function call.
2947 // Check if the function is a variable or a property.
2948 Variable* var = function->AsVariableProxy()->AsVariable();
2949 Property* property = function->AsProperty();
2950
2951 // ------------------------------------------------------------------------
2952 // Fast-case: Use inline caching.
2953 // ---
2954 // According to ECMA-262, section 11.2.3, page 44, the function to call
2955 // must be resolved after the arguments have been evaluated. The IC code
2956 // automatically handles this by loading the arguments before the function
2957 // is resolved in cache misses (this also holds for megamorphic calls).
2958 // ------------------------------------------------------------------------
2959
2960 if (var != NULL && var->is_possibly_eval()) {
2961 // ----------------------------------
2962 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2963 // ----------------------------------
2964
2965 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2966 // resolve the function we need to call and the receiver of the
2967 // call. Then we call the resolved function using the given
2968 // arguments.
2969 // Prepare stack for call to resolved function.
2970 LoadAndSpill(function);
2971 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2972 frame_->EmitPush(r2); // Slot for receiver
2973 int arg_count = args->length();
2974 for (int i = 0; i < arg_count; i++) {
2975 LoadAndSpill(args->at(i));
2976 }
2977
2978 // Prepare stack for call to ResolvePossiblyDirectEval.
2979 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2980 frame_->EmitPush(r1);
2981 if (arg_count > 0) {
2982 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
2983 frame_->EmitPush(r1);
2984 } else {
2985 frame_->EmitPush(r2);
2986 }
2987
Leon Clarkee46be812010-01-19 14:06:41 +00002988 // Push the receiver.
2989 __ ldr(r1, frame_->Receiver());
2990 frame_->EmitPush(r1);
2991
Steve Blocka7e24c12009-10-30 11:49:00 +00002992 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00002993 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002994
2995 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00002996 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00002997 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
2998
2999 // Call the function.
3000 CodeForSourcePosition(node->position());
3001
3002 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00003003 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003004 frame_->CallStub(&call_function, arg_count + 1);
3005
3006 __ ldr(cp, frame_->Context());
3007 // Remove the function from the stack.
3008 frame_->Drop();
3009 frame_->EmitPush(r0);
3010
3011 } else if (var != NULL && !var->is_this() && var->is_global()) {
3012 // ----------------------------------
3013 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3014 // ----------------------------------
3015
3016 // Push the name of the function and the receiver onto the stack.
3017 __ mov(r0, Operand(var->name()));
3018 frame_->EmitPush(r0);
3019
3020 // Pass the global object as the receiver and let the IC stub
3021 // patch the stack to use the global proxy as 'this' in the
3022 // invoked function.
3023 LoadGlobal();
3024
3025 // Load the arguments.
3026 int arg_count = args->length();
3027 for (int i = 0; i < arg_count; i++) {
3028 LoadAndSpill(args->at(i));
3029 }
3030
3031 // Setup the receiver register and call the IC initialization code.
3032 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3033 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3034 CodeForSourcePosition(node->position());
3035 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3036 arg_count + 1);
3037 __ ldr(cp, frame_->Context());
3038 // Remove the function from the stack.
3039 frame_->Drop();
3040 frame_->EmitPush(r0);
3041
3042 } else if (var != NULL && var->slot() != NULL &&
3043 var->slot()->type() == Slot::LOOKUP) {
3044 // ----------------------------------
3045 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3046 // ----------------------------------
3047
3048 // Load the function
3049 frame_->EmitPush(cp);
3050 __ mov(r0, Operand(var->name()));
3051 frame_->EmitPush(r0);
3052 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3053 // r0: slot value; r1: receiver
3054
3055 // Load the receiver.
3056 frame_->EmitPush(r0); // function
3057 frame_->EmitPush(r1); // receiver
3058
3059 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003060 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003061 frame_->EmitPush(r0);
3062
3063 } else if (property != NULL) {
3064 // Check if the key is a literal string.
3065 Literal* literal = property->key()->AsLiteral();
3066
3067 if (literal != NULL && literal->handle()->IsSymbol()) {
3068 // ------------------------------------------------------------------
3069 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3070 // ------------------------------------------------------------------
3071
3072 // Push the name of the function and the receiver onto the stack.
3073 __ mov(r0, Operand(literal->handle()));
3074 frame_->EmitPush(r0);
3075 LoadAndSpill(property->obj());
3076
3077 // Load the arguments.
3078 int arg_count = args->length();
3079 for (int i = 0; i < arg_count; i++) {
3080 LoadAndSpill(args->at(i));
3081 }
3082
3083 // Set the receiver register and call the IC initialization code.
3084 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3085 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3086 CodeForSourcePosition(node->position());
3087 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3088 __ ldr(cp, frame_->Context());
3089
3090 // Remove the function from the stack.
3091 frame_->Drop();
3092
3093 frame_->EmitPush(r0); // push after get rid of function from the stack
3094
3095 } else {
3096 // -------------------------------------------
3097 // JavaScript example: 'array[index](1, 2, 3)'
3098 // -------------------------------------------
3099
3100 // Load the function to call from the property through a reference.
3101 Reference ref(this, property);
Steve Blockd0582a62009-12-15 09:54:21 +00003102 ref.GetValueAndSpill(); // receiver
Steve Blocka7e24c12009-10-30 11:49:00 +00003103
3104 // Pass receiver to called function.
3105 if (property->is_synthetic()) {
3106 LoadGlobalReceiver(r0);
3107 } else {
3108 __ ldr(r0, frame_->ElementAt(ref.size()));
3109 frame_->EmitPush(r0);
3110 }
3111
3112 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003113 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003114 frame_->EmitPush(r0);
3115 }
3116
3117 } else {
3118 // ----------------------------------
3119 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3120 // ----------------------------------
3121
3122 // Load the function.
3123 LoadAndSpill(function);
3124
3125 // Pass the global proxy as the receiver.
3126 LoadGlobalReceiver(r0);
3127
3128 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003129 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 frame_->EmitPush(r0);
3131 }
3132 ASSERT(frame_->height() == original_height + 1);
3133}
3134
3135
3136void CodeGenerator::VisitCallNew(CallNew* node) {
3137#ifdef DEBUG
3138 int original_height = frame_->height();
3139#endif
3140 VirtualFrame::SpilledScope spilled_scope;
3141 Comment cmnt(masm_, "[ CallNew");
3142
3143 // According to ECMA-262, section 11.2.2, page 44, the function
3144 // expression in new calls must be evaluated before the
3145 // arguments. This is different from ordinary calls, where the
3146 // actual function to call is resolved after the arguments have been
3147 // evaluated.
3148
3149 // Compute function to call and use the global object as the
3150 // receiver. There is no need to use the global proxy here because
3151 // it will always be replaced with a newly allocated object.
3152 LoadAndSpill(node->expression());
3153 LoadGlobal();
3154
3155 // Push the arguments ("left-to-right") on the stack.
3156 ZoneList<Expression*>* args = node->arguments();
3157 int arg_count = args->length();
3158 for (int i = 0; i < arg_count; i++) {
3159 LoadAndSpill(args->at(i));
3160 }
3161
3162 // r0: the number of arguments.
3163 Result num_args(r0);
3164 __ mov(r0, Operand(arg_count));
3165
3166 // Load the function into r1 as per calling convention.
3167 Result function(r1);
3168 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3169
3170 // Call the construct call builtin that handles allocation and
3171 // constructor invocation.
3172 CodeForSourcePosition(node->position());
3173 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
3174 frame_->CallCodeObject(ic,
3175 RelocInfo::CONSTRUCT_CALL,
3176 &num_args,
3177 &function,
3178 arg_count + 1);
3179
3180 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3181 __ str(r0, frame_->Top());
3182 ASSERT(frame_->height() == original_height + 1);
3183}
3184
3185
3186void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3187 VirtualFrame::SpilledScope spilled_scope;
3188 ASSERT(args->length() == 1);
3189 JumpTarget leave, null, function, non_function_constructor;
3190
3191 // Load the object into r0.
3192 LoadAndSpill(args->at(0));
3193 frame_->EmitPop(r0);
3194
3195 // If the object is a smi, we return null.
3196 __ tst(r0, Operand(kSmiTagMask));
3197 null.Branch(eq);
3198
3199 // Check that the object is a JS object but take special care of JS
3200 // functions to make sure they have 'Function' as their class.
3201 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3202 null.Branch(lt);
3203
3204 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3205 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3206 // LAST_JS_OBJECT_TYPE.
3207 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3208 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3209 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3210 function.Branch(eq);
3211
3212 // Check if the constructor in the map is a function.
3213 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3214 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3215 non_function_constructor.Branch(ne);
3216
3217 // The r0 register now contains the constructor function. Grab the
3218 // instance class name from there.
3219 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3220 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3221 frame_->EmitPush(r0);
3222 leave.Jump();
3223
3224 // Functions have class 'Function'.
3225 function.Bind();
3226 __ mov(r0, Operand(Factory::function_class_symbol()));
3227 frame_->EmitPush(r0);
3228 leave.Jump();
3229
3230 // Objects with a non-function constructor have class 'Object'.
3231 non_function_constructor.Bind();
3232 __ mov(r0, Operand(Factory::Object_symbol()));
3233 frame_->EmitPush(r0);
3234 leave.Jump();
3235
3236 // Non-JS objects have class null.
3237 null.Bind();
3238 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3239 frame_->EmitPush(r0);
3240
3241 // All done.
3242 leave.Bind();
3243}
3244
3245
3246void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3247 VirtualFrame::SpilledScope spilled_scope;
3248 ASSERT(args->length() == 1);
3249 JumpTarget leave;
3250 LoadAndSpill(args->at(0));
3251 frame_->EmitPop(r0); // r0 contains object.
3252 // if (object->IsSmi()) return the object.
3253 __ tst(r0, Operand(kSmiTagMask));
3254 leave.Branch(eq);
3255 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3256 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3257 leave.Branch(ne);
3258 // Load the value.
3259 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3260 leave.Bind();
3261 frame_->EmitPush(r0);
3262}
3263
3264
3265void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3266 VirtualFrame::SpilledScope spilled_scope;
3267 ASSERT(args->length() == 2);
3268 JumpTarget leave;
3269 LoadAndSpill(args->at(0)); // Load the object.
3270 LoadAndSpill(args->at(1)); // Load the value.
3271 frame_->EmitPop(r0); // r0 contains value
3272 frame_->EmitPop(r1); // r1 contains object
3273 // if (object->IsSmi()) return object.
3274 __ tst(r1, Operand(kSmiTagMask));
3275 leave.Branch(eq);
3276 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3277 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3278 leave.Branch(ne);
3279 // Store the value.
3280 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3281 // Update the write barrier.
3282 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3283 __ RecordWrite(r1, r2, r3);
3284 // Leave.
3285 leave.Bind();
3286 frame_->EmitPush(r0);
3287}
3288
3289
3290void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3291 VirtualFrame::SpilledScope spilled_scope;
3292 ASSERT(args->length() == 1);
3293 LoadAndSpill(args->at(0));
3294 frame_->EmitPop(r0);
3295 __ tst(r0, Operand(kSmiTagMask));
3296 cc_reg_ = eq;
3297}
3298
3299
3300void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3301 VirtualFrame::SpilledScope spilled_scope;
3302 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3303 ASSERT_EQ(args->length(), 3);
3304#ifdef ENABLE_LOGGING_AND_PROFILING
3305 if (ShouldGenerateLog(args->at(0))) {
3306 LoadAndSpill(args->at(1));
3307 LoadAndSpill(args->at(2));
3308 __ CallRuntime(Runtime::kLog, 2);
3309 }
3310#endif
3311 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3312 frame_->EmitPush(r0);
3313}
3314
3315
3316void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3317 VirtualFrame::SpilledScope spilled_scope;
3318 ASSERT(args->length() == 1);
3319 LoadAndSpill(args->at(0));
3320 frame_->EmitPop(r0);
3321 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3322 cc_reg_ = eq;
3323}
3324
3325
3326// This should generate code that performs a charCodeAt() call or returns
3327// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3328// It is not yet implemented on ARM, so it always goes to the slow case.
3329void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3330 VirtualFrame::SpilledScope spilled_scope;
3331 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003332 Comment(masm_, "[ GenerateFastCharCodeAt");
3333
3334 LoadAndSpill(args->at(0));
3335 LoadAndSpill(args->at(1));
3336 frame_->EmitPop(r0); // Index.
3337 frame_->EmitPop(r1); // String.
3338
3339 Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
3340
3341 __ tst(r1, Operand(kSmiTagMask));
3342 __ b(eq, &slow); // The 'string' was a Smi.
3343
3344 ASSERT(kSmiTag == 0);
3345 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3346 __ b(ne, &slow); // The index was negative or not a Smi.
3347
3348 __ bind(&try_again_with_new_string);
3349 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
3350 __ b(ge, &slow);
3351
3352 // Now r2 has the string type.
3353 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
3354 // Now r3 has the length of the string. Compare with the index.
3355 __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
3356 __ b(le, &slow);
3357
3358 // Here we know the index is in range. Check that string is sequential.
3359 ASSERT_EQ(0, kSeqStringTag);
3360 __ tst(r2, Operand(kStringRepresentationMask));
3361 __ b(ne, &not_a_flat_string);
3362
3363 // Check whether it is an ASCII string.
3364 ASSERT_EQ(0, kTwoByteStringTag);
3365 __ tst(r2, Operand(kStringEncodingMask));
3366 __ b(ne, &ascii_string);
3367
3368 // 2-byte string. We can add without shifting since the Smi tag size is the
3369 // log2 of the number of bytes in a two-byte character.
3370 ASSERT_EQ(1, kSmiTagSize);
3371 ASSERT_EQ(0, kSmiShiftSize);
3372 __ add(r1, r1, Operand(r0));
3373 __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
3374 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3375 __ jmp(&end);
3376
3377 __ bind(&ascii_string);
3378 __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
3379 __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
3380 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3381 __ jmp(&end);
3382
3383 __ bind(&not_a_flat_string);
3384 __ and_(r2, r2, Operand(kStringRepresentationMask));
3385 __ cmp(r2, Operand(kConsStringTag));
3386 __ b(ne, &slow);
3387
3388 // ConsString.
3389 // Check that the right hand side is the empty string (ie if this is really a
3390 // flat string in a cons string). If that is not the case we would rather go
3391 // to the runtime system now, to flatten the string.
3392 __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
3393 __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
3394 __ cmp(r2, Operand(r3));
3395 __ b(ne, &slow);
3396
3397 // Get the first of the two strings.
3398 __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
3399 __ jmp(&try_again_with_new_string);
3400
3401 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00003402 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00003403
3404 __ bind(&end);
Steve Blocka7e24c12009-10-30 11:49:00 +00003405 frame_->EmitPush(r0);
3406}
3407
3408
3409void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3410 VirtualFrame::SpilledScope spilled_scope;
3411 ASSERT(args->length() == 1);
3412 LoadAndSpill(args->at(0));
3413 JumpTarget answer;
3414 // We need the CC bits to come out as not_equal in the case where the
3415 // object is a smi. This can't be done with the usual test opcode so
3416 // we use XOR to get the right CC bits.
3417 frame_->EmitPop(r0);
3418 __ and_(r1, r0, Operand(kSmiTagMask));
3419 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3420 answer.Branch(ne);
3421 // It is a heap object - get the map. Check if the object is a JS array.
3422 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3423 answer.Bind();
3424 cc_reg_ = eq;
3425}
3426
3427
Steve Blockd0582a62009-12-15 09:54:21 +00003428void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3429 // This generates a fast version of:
3430 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3431 VirtualFrame::SpilledScope spilled_scope;
3432 ASSERT(args->length() == 1);
3433 LoadAndSpill(args->at(0));
3434 frame_->EmitPop(r1);
3435 __ tst(r1, Operand(kSmiTagMask));
3436 false_target()->Branch(eq);
3437
3438 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3439 __ cmp(r1, ip);
3440 true_target()->Branch(eq);
3441
3442 Register map_reg = r2;
3443 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
3444 // Undetectable objects behave like undefined when tested with typeof.
3445 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
3446 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
3447 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
3448 false_target()->Branch(eq);
3449
3450 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
3451 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
3452 false_target()->Branch(lt);
3453 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
3454 cc_reg_ = le;
3455}
3456
3457
3458void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3459 // This generates a fast version of:
3460 // (%_ClassOf(arg) === 'Function')
3461 VirtualFrame::SpilledScope spilled_scope;
3462 ASSERT(args->length() == 1);
3463 LoadAndSpill(args->at(0));
3464 frame_->EmitPop(r0);
3465 __ tst(r0, Operand(kSmiTagMask));
3466 false_target()->Branch(eq);
3467 Register map_reg = r2;
3468 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
3469 cc_reg_ = eq;
3470}
3471
3472
Steve Blocka7e24c12009-10-30 11:49:00 +00003473void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3474 VirtualFrame::SpilledScope spilled_scope;
3475 ASSERT(args->length() == 0);
3476
3477 // Get the frame pointer for the calling frame.
3478 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3479
3480 // Skip the arguments adaptor frame if it exists.
3481 Label check_frame_marker;
3482 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3483 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3484 __ b(ne, &check_frame_marker);
3485 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3486
3487 // Check the marker in the calling frame.
3488 __ bind(&check_frame_marker);
3489 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3490 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3491 cc_reg_ = eq;
3492}
3493
3494
3495void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3496 VirtualFrame::SpilledScope spilled_scope;
3497 ASSERT(args->length() == 0);
3498
3499 // Seed the result with the formal parameters count, which will be used
3500 // in case no arguments adaptor frame is found below the current frame.
3501 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3502
3503 // Call the shared stub to get to the arguments.length.
3504 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3505 frame_->CallStub(&stub, 0);
3506 frame_->EmitPush(r0);
3507}
3508
3509
3510void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3511 VirtualFrame::SpilledScope spilled_scope;
3512 ASSERT(args->length() == 1);
3513
3514 // Satisfy contract with ArgumentsAccessStub:
3515 // Load the key into r1 and the formal parameters count into r0.
3516 LoadAndSpill(args->at(0));
3517 frame_->EmitPop(r1);
3518 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3519
3520 // Call the shared stub to get to arguments[key].
3521 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3522 frame_->CallStub(&stub, 0);
3523 frame_->EmitPush(r0);
3524}
3525
3526
3527void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3528 VirtualFrame::SpilledScope spilled_scope;
3529 ASSERT(args->length() == 0);
3530 __ Call(ExternalReference::random_positive_smi_function().address(),
3531 RelocInfo::RUNTIME_ENTRY);
3532 frame_->EmitPush(r0);
3533}
3534
3535
Steve Blockd0582a62009-12-15 09:54:21 +00003536void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3537 ASSERT_EQ(2, args->length());
3538
3539 Load(args->at(0));
3540 Load(args->at(1));
3541
3542 frame_->CallRuntime(Runtime::kStringAdd, 2);
3543 frame_->EmitPush(r0);
3544}
3545
3546
Leon Clarkee46be812010-01-19 14:06:41 +00003547void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
3548 ASSERT_EQ(3, args->length());
3549
3550 Load(args->at(0));
3551 Load(args->at(1));
3552 Load(args->at(2));
3553
3554 frame_->CallRuntime(Runtime::kSubString, 3);
3555 frame_->EmitPush(r0);
3556}
3557
3558
3559void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
3560 ASSERT_EQ(2, args->length());
3561
3562 Load(args->at(0));
3563 Load(args->at(1));
3564
3565 frame_->CallRuntime(Runtime::kStringCompare, 2);
3566 frame_->EmitPush(r0);
3567}
3568
3569
3570void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
3571 ASSERT_EQ(4, args->length());
3572
3573 Load(args->at(0));
3574 Load(args->at(1));
3575 Load(args->at(2));
3576 Load(args->at(3));
3577
3578 frame_->CallRuntime(Runtime::kRegExpExec, 4);
3579 frame_->EmitPush(r0);
3580}
3581
3582
Steve Blocka7e24c12009-10-30 11:49:00 +00003583void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3584 VirtualFrame::SpilledScope spilled_scope;
3585 ASSERT(args->length() == 2);
3586
3587 // Load the two objects into registers and perform the comparison.
3588 LoadAndSpill(args->at(0));
3589 LoadAndSpill(args->at(1));
3590 frame_->EmitPop(r0);
3591 frame_->EmitPop(r1);
3592 __ cmp(r0, Operand(r1));
3593 cc_reg_ = eq;
3594}
3595
3596
3597void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3598#ifdef DEBUG
3599 int original_height = frame_->height();
3600#endif
3601 VirtualFrame::SpilledScope spilled_scope;
3602 if (CheckForInlineRuntimeCall(node)) {
3603 ASSERT((has_cc() && frame_->height() == original_height) ||
3604 (!has_cc() && frame_->height() == original_height + 1));
3605 return;
3606 }
3607
3608 ZoneList<Expression*>* args = node->arguments();
3609 Comment cmnt(masm_, "[ CallRuntime");
3610 Runtime::Function* function = node->function();
3611
3612 if (function == NULL) {
3613 // Prepare stack for calling JS runtime function.
3614 __ mov(r0, Operand(node->name()));
3615 frame_->EmitPush(r0);
3616 // Push the builtins object found in the current global object.
3617 __ ldr(r1, GlobalObject());
3618 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3619 frame_->EmitPush(r0);
3620 }
3621
3622 // Push the arguments ("left-to-right").
3623 int arg_count = args->length();
3624 for (int i = 0; i < arg_count; i++) {
3625 LoadAndSpill(args->at(i));
3626 }
3627
3628 if (function == NULL) {
3629 // Call the JS runtime function.
3630 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3631 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3632 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3633 __ ldr(cp, frame_->Context());
3634 frame_->Drop();
3635 frame_->EmitPush(r0);
3636 } else {
3637 // Call the C runtime function.
3638 frame_->CallRuntime(function, arg_count);
3639 frame_->EmitPush(r0);
3640 }
3641 ASSERT(frame_->height() == original_height + 1);
3642}
3643
3644
3645void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3646#ifdef DEBUG
3647 int original_height = frame_->height();
3648#endif
3649 VirtualFrame::SpilledScope spilled_scope;
3650 Comment cmnt(masm_, "[ UnaryOperation");
3651
3652 Token::Value op = node->op();
3653
3654 if (op == Token::NOT) {
3655 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003656 false_target(),
3657 true_target(),
3658 true);
3659 // LoadCondition may (and usually does) leave a test and branch to
3660 // be emitted by the caller. In that case, negate the condition.
3661 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3662
3663 } else if (op == Token::DELETE) {
3664 Property* property = node->expression()->AsProperty();
3665 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3666 if (property != NULL) {
3667 LoadAndSpill(property->obj());
3668 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00003669 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003670
3671 } else if (variable != NULL) {
3672 Slot* slot = variable->slot();
3673 if (variable->is_global()) {
3674 LoadGlobal();
3675 __ mov(r0, Operand(variable->name()));
3676 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003677 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003678
3679 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3680 // lookup the context holding the named variable
3681 frame_->EmitPush(cp);
3682 __ mov(r0, Operand(variable->name()));
3683 frame_->EmitPush(r0);
3684 frame_->CallRuntime(Runtime::kLookupContext, 2);
3685 // r0: context
3686 frame_->EmitPush(r0);
3687 __ mov(r0, Operand(variable->name()));
3688 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003689 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003690
3691 } else {
3692 // Default: Result of deleting non-global, not dynamically
3693 // introduced variables is false.
3694 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3695 }
3696
3697 } else {
3698 // Default: Result of deleting expressions is true.
3699 LoadAndSpill(node->expression()); // may have side-effects
3700 frame_->Drop();
3701 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3702 }
3703 frame_->EmitPush(r0);
3704
3705 } else if (op == Token::TYPEOF) {
3706 // Special case for loading the typeof expression; see comment on
3707 // LoadTypeofExpression().
3708 LoadTypeofExpression(node->expression());
3709 frame_->CallRuntime(Runtime::kTypeof, 1);
3710 frame_->EmitPush(r0); // r0 has result
3711
3712 } else {
3713 LoadAndSpill(node->expression());
3714 frame_->EmitPop(r0);
3715 switch (op) {
3716 case Token::NOT:
3717 case Token::DELETE:
3718 case Token::TYPEOF:
3719 UNREACHABLE(); // handled above
3720 break;
3721
3722 case Token::SUB: {
3723 bool overwrite =
3724 (node->expression()->AsBinaryOperation() != NULL &&
3725 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Leon Clarkee46be812010-01-19 14:06:41 +00003726 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00003727 frame_->CallStub(&stub, 0);
3728 break;
3729 }
3730
3731 case Token::BIT_NOT: {
3732 // smi check
3733 JumpTarget smi_label;
3734 JumpTarget continue_label;
3735 __ tst(r0, Operand(kSmiTagMask));
3736 smi_label.Branch(eq);
3737
3738 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003739 frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003740
3741 continue_label.Jump();
3742 smi_label.Bind();
3743 __ mvn(r0, Operand(r0));
3744 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3745 continue_label.Bind();
3746 break;
3747 }
3748
3749 case Token::VOID:
3750 // since the stack top is cached in r0, popping and then
3751 // pushing a value can be done by just writing to r0.
3752 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3753 break;
3754
3755 case Token::ADD: {
3756 // Smi check.
3757 JumpTarget continue_label;
3758 __ tst(r0, Operand(kSmiTagMask));
3759 continue_label.Branch(eq);
3760 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003761 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003762 continue_label.Bind();
3763 break;
3764 }
3765 default:
3766 UNREACHABLE();
3767 }
3768 frame_->EmitPush(r0); // r0 has result
3769 }
3770 ASSERT(!has_valid_frame() ||
3771 (has_cc() && frame_->height() == original_height) ||
3772 (!has_cc() && frame_->height() == original_height + 1));
3773}
3774
3775
3776void CodeGenerator::VisitCountOperation(CountOperation* node) {
3777#ifdef DEBUG
3778 int original_height = frame_->height();
3779#endif
3780 VirtualFrame::SpilledScope spilled_scope;
3781 Comment cmnt(masm_, "[ CountOperation");
3782
3783 bool is_postfix = node->is_postfix();
3784 bool is_increment = node->op() == Token::INC;
3785
3786 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3787 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3788
3789 // Postfix: Make room for the result.
3790 if (is_postfix) {
3791 __ mov(r0, Operand(0));
3792 frame_->EmitPush(r0);
3793 }
3794
3795 { Reference target(this, node->expression());
3796 if (target.is_illegal()) {
3797 // Spoof the virtual frame to have the expected height (one higher
3798 // than on entry).
3799 if (!is_postfix) {
3800 __ mov(r0, Operand(Smi::FromInt(0)));
3801 frame_->EmitPush(r0);
3802 }
3803 ASSERT(frame_->height() == original_height + 1);
3804 return;
3805 }
Steve Blockd0582a62009-12-15 09:54:21 +00003806 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00003807 frame_->EmitPop(r0);
3808
3809 JumpTarget slow;
3810 JumpTarget exit;
3811
3812 // Load the value (1) into register r1.
3813 __ mov(r1, Operand(Smi::FromInt(1)));
3814
3815 // Check for smi operand.
3816 __ tst(r0, Operand(kSmiTagMask));
3817 slow.Branch(ne);
3818
3819 // Postfix: Store the old value as the result.
3820 if (is_postfix) {
3821 __ str(r0, frame_->ElementAt(target.size()));
3822 }
3823
3824 // Perform optimistic increment/decrement.
3825 if (is_increment) {
3826 __ add(r0, r0, Operand(r1), SetCC);
3827 } else {
3828 __ sub(r0, r0, Operand(r1), SetCC);
3829 }
3830
3831 // If the increment/decrement didn't overflow, we're done.
3832 exit.Branch(vc);
3833
3834 // Revert optimistic increment/decrement.
3835 if (is_increment) {
3836 __ sub(r0, r0, Operand(r1));
3837 } else {
3838 __ add(r0, r0, Operand(r1));
3839 }
3840
3841 // Slow case: Convert to number.
3842 slow.Bind();
3843 {
3844 // Convert the operand to a number.
3845 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003846 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003847 }
3848 if (is_postfix) {
3849 // Postfix: store to result (on the stack).
3850 __ str(r0, frame_->ElementAt(target.size()));
3851 }
3852
3853 // Compute the new value.
3854 __ mov(r1, Operand(Smi::FromInt(1)));
3855 frame_->EmitPush(r0);
3856 frame_->EmitPush(r1);
3857 if (is_increment) {
3858 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3859 } else {
3860 frame_->CallRuntime(Runtime::kNumberSub, 2);
3861 }
3862
3863 // Store the new value in the target if not const.
3864 exit.Bind();
3865 frame_->EmitPush(r0);
3866 if (!is_const) target.SetValue(NOT_CONST_INIT);
3867 }
3868
3869 // Postfix: Discard the new value and use the old.
3870 if (is_postfix) frame_->EmitPop(r0);
3871 ASSERT(frame_->height() == original_height + 1);
3872}
3873
3874
3875void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3876#ifdef DEBUG
3877 int original_height = frame_->height();
3878#endif
3879 VirtualFrame::SpilledScope spilled_scope;
3880 Comment cmnt(masm_, "[ BinaryOperation");
3881 Token::Value op = node->op();
3882
3883 // According to ECMA-262 section 11.11, page 58, the binary logical
3884 // operators must yield the result of one of the two expressions
3885 // before any ToBoolean() conversions. This means that the value
3886 // produced by a && or || operator is not necessarily a boolean.
3887
3888 // NOTE: If the left hand side produces a materialized value (not in
3889 // the CC register), we force the right hand side to do the
3890 // same. This is necessary because we may have to branch to the exit
3891 // after evaluating the left hand side (due to the shortcut
3892 // semantics), but the compiler must (statically) know if the result
3893 // of compiling the binary operation is materialized or not.
3894
3895 if (op == Token::AND) {
3896 JumpTarget is_true;
3897 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003898 &is_true,
3899 false_target(),
3900 false);
3901 if (has_valid_frame() && !has_cc()) {
3902 // The left-hand side result is on top of the virtual frame.
3903 JumpTarget pop_and_continue;
3904 JumpTarget exit;
3905
3906 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3907 frame_->EmitPush(r0);
3908 // Avoid popping the result if it converts to 'false' using the
3909 // standard ToBoolean() conversion as described in ECMA-262,
3910 // section 9.2, page 30.
3911 ToBoolean(&pop_and_continue, &exit);
3912 Branch(false, &exit);
3913
3914 // Pop the result of evaluating the first part.
3915 pop_and_continue.Bind();
3916 frame_->EmitPop(r0);
3917
3918 // Evaluate right side expression.
3919 is_true.Bind();
3920 LoadAndSpill(node->right());
3921
3922 // Exit (always with a materialized value).
3923 exit.Bind();
3924 } else if (has_cc() || is_true.is_linked()) {
3925 // The left-hand side is either (a) partially compiled to
3926 // control flow with a final branch left to emit or (b) fully
3927 // compiled to control flow and possibly true.
3928 if (has_cc()) {
3929 Branch(false, false_target());
3930 }
3931 is_true.Bind();
3932 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003933 true_target(),
3934 false_target(),
3935 false);
3936 } else {
3937 // Nothing to do.
3938 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
3939 }
3940
3941 } else if (op == Token::OR) {
3942 JumpTarget is_false;
3943 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003944 true_target(),
3945 &is_false,
3946 false);
3947 if (has_valid_frame() && !has_cc()) {
3948 // The left-hand side result is on top of the virtual frame.
3949 JumpTarget pop_and_continue;
3950 JumpTarget exit;
3951
3952 __ ldr(r0, frame_->Top());
3953 frame_->EmitPush(r0);
3954 // Avoid popping the result if it converts to 'true' using the
3955 // standard ToBoolean() conversion as described in ECMA-262,
3956 // section 9.2, page 30.
3957 ToBoolean(&exit, &pop_and_continue);
3958 Branch(true, &exit);
3959
3960 // Pop the result of evaluating the first part.
3961 pop_and_continue.Bind();
3962 frame_->EmitPop(r0);
3963
3964 // Evaluate right side expression.
3965 is_false.Bind();
3966 LoadAndSpill(node->right());
3967
3968 // Exit (always with a materialized value).
3969 exit.Bind();
3970 } else if (has_cc() || is_false.is_linked()) {
3971 // The left-hand side is either (a) partially compiled to
3972 // control flow with a final branch left to emit or (b) fully
3973 // compiled to control flow and possibly false.
3974 if (has_cc()) {
3975 Branch(true, true_target());
3976 }
3977 is_false.Bind();
3978 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003979 true_target(),
3980 false_target(),
3981 false);
3982 } else {
3983 // Nothing to do.
3984 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
3985 }
3986
3987 } else {
3988 // Optimize for the case where (at least) one of the expressions
3989 // is a literal small integer.
3990 Literal* lliteral = node->left()->AsLiteral();
3991 Literal* rliteral = node->right()->AsLiteral();
3992 // NOTE: The code below assumes that the slow cases (calls to runtime)
3993 // never return a constant/immutable object.
3994 bool overwrite_left =
3995 (node->left()->AsBinaryOperation() != NULL &&
3996 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
3997 bool overwrite_right =
3998 (node->right()->AsBinaryOperation() != NULL &&
3999 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
4000
4001 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
4002 LoadAndSpill(node->left());
4003 SmiOperation(node->op(),
4004 rliteral->handle(),
4005 false,
4006 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
4007
4008 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
4009 LoadAndSpill(node->right());
4010 SmiOperation(node->op(),
4011 lliteral->handle(),
4012 true,
4013 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
4014
4015 } else {
4016 OverwriteMode overwrite_mode = NO_OVERWRITE;
4017 if (overwrite_left) {
4018 overwrite_mode = OVERWRITE_LEFT;
4019 } else if (overwrite_right) {
4020 overwrite_mode = OVERWRITE_RIGHT;
4021 }
4022 LoadAndSpill(node->left());
4023 LoadAndSpill(node->right());
4024 GenericBinaryOperation(node->op(), overwrite_mode);
4025 }
4026 frame_->EmitPush(r0);
4027 }
4028 ASSERT(!has_valid_frame() ||
4029 (has_cc() && frame_->height() == original_height) ||
4030 (!has_cc() && frame_->height() == original_height + 1));
4031}
4032
4033
4034void CodeGenerator::VisitThisFunction(ThisFunction* node) {
4035#ifdef DEBUG
4036 int original_height = frame_->height();
4037#endif
4038 VirtualFrame::SpilledScope spilled_scope;
4039 __ ldr(r0, frame_->Function());
4040 frame_->EmitPush(r0);
4041 ASSERT(frame_->height() == original_height + 1);
4042}
4043
4044
4045void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
4046#ifdef DEBUG
4047 int original_height = frame_->height();
4048#endif
4049 VirtualFrame::SpilledScope spilled_scope;
4050 Comment cmnt(masm_, "[ CompareOperation");
4051
4052 // Get the expressions from the node.
4053 Expression* left = node->left();
4054 Expression* right = node->right();
4055 Token::Value op = node->op();
4056
4057 // To make null checks efficient, we check if either left or right is the
4058 // literal 'null'. If so, we optimize the code by inlining a null check
4059 // instead of calling the (very) general runtime routine for checking
4060 // equality.
4061 if (op == Token::EQ || op == Token::EQ_STRICT) {
4062 bool left_is_null =
4063 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
4064 bool right_is_null =
4065 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
4066 // The 'null' value can only be equal to 'null' or 'undefined'.
4067 if (left_is_null || right_is_null) {
4068 LoadAndSpill(left_is_null ? right : left);
4069 frame_->EmitPop(r0);
4070 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4071 __ cmp(r0, ip);
4072
4073 // The 'null' value is only equal to 'undefined' if using non-strict
4074 // comparisons.
4075 if (op != Token::EQ_STRICT) {
4076 true_target()->Branch(eq);
4077
4078 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4079 __ cmp(r0, Operand(ip));
4080 true_target()->Branch(eq);
4081
4082 __ tst(r0, Operand(kSmiTagMask));
4083 false_target()->Branch(eq);
4084
4085 // It can be an undetectable object.
4086 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
4087 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
4088 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
4089 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
4090 }
4091
4092 cc_reg_ = eq;
4093 ASSERT(has_cc() && frame_->height() == original_height);
4094 return;
4095 }
4096 }
4097
4098 // To make typeof testing for natives implemented in JavaScript really
4099 // efficient, we generate special code for expressions of the form:
4100 // 'typeof <expression> == <string>'.
4101 UnaryOperation* operation = left->AsUnaryOperation();
4102 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
4103 (operation != NULL && operation->op() == Token::TYPEOF) &&
4104 (right->AsLiteral() != NULL &&
4105 right->AsLiteral()->handle()->IsString())) {
4106 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
4107
4108 // Load the operand, move it to register r1.
4109 LoadTypeofExpression(operation->expression());
4110 frame_->EmitPop(r1);
4111
4112 if (check->Equals(Heap::number_symbol())) {
4113 __ tst(r1, Operand(kSmiTagMask));
4114 true_target()->Branch(eq);
4115 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4116 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4117 __ cmp(r1, ip);
4118 cc_reg_ = eq;
4119
4120 } else if (check->Equals(Heap::string_symbol())) {
4121 __ tst(r1, Operand(kSmiTagMask));
4122 false_target()->Branch(eq);
4123
4124 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4125
4126 // It can be an undetectable string object.
4127 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4128 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4129 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4130 false_target()->Branch(eq);
4131
4132 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4133 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
4134 cc_reg_ = lt;
4135
4136 } else if (check->Equals(Heap::boolean_symbol())) {
4137 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4138 __ cmp(r1, ip);
4139 true_target()->Branch(eq);
4140 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4141 __ cmp(r1, ip);
4142 cc_reg_ = eq;
4143
4144 } else if (check->Equals(Heap::undefined_symbol())) {
4145 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4146 __ cmp(r1, ip);
4147 true_target()->Branch(eq);
4148
4149 __ tst(r1, Operand(kSmiTagMask));
4150 false_target()->Branch(eq);
4151
4152 // It can be an undetectable object.
4153 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4154 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4155 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4156 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4157
4158 cc_reg_ = eq;
4159
4160 } else if (check->Equals(Heap::function_symbol())) {
4161 __ tst(r1, Operand(kSmiTagMask));
4162 false_target()->Branch(eq);
Steve Blockd0582a62009-12-15 09:54:21 +00004163 Register map_reg = r2;
4164 __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
4165 true_target()->Branch(eq);
4166 // Regular expressions are callable so typeof == 'function'.
4167 __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004168 cc_reg_ = eq;
4169
4170 } else if (check->Equals(Heap::object_symbol())) {
4171 __ tst(r1, Operand(kSmiTagMask));
4172 false_target()->Branch(eq);
4173
Steve Blocka7e24c12009-10-30 11:49:00 +00004174 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4175 __ cmp(r1, ip);
4176 true_target()->Branch(eq);
4177
Steve Blockd0582a62009-12-15 09:54:21 +00004178 Register map_reg = r2;
4179 __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
4180 false_target()->Branch(eq);
4181
Steve Blocka7e24c12009-10-30 11:49:00 +00004182 // It can be an undetectable object.
Steve Blockd0582a62009-12-15 09:54:21 +00004183 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004184 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4185 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4186 false_target()->Branch(eq);
4187
Steve Blockd0582a62009-12-15 09:54:21 +00004188 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4189 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004190 false_target()->Branch(lt);
Steve Blockd0582a62009-12-15 09:54:21 +00004191 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004192 cc_reg_ = le;
4193
4194 } else {
4195 // Uncommon case: typeof testing against a string literal that is
4196 // never returned from the typeof operator.
4197 false_target()->Jump();
4198 }
4199 ASSERT(!has_valid_frame() ||
4200 (has_cc() && frame_->height() == original_height));
4201 return;
4202 }
4203
4204 switch (op) {
4205 case Token::EQ:
4206 Comparison(eq, left, right, false);
4207 break;
4208
4209 case Token::LT:
4210 Comparison(lt, left, right);
4211 break;
4212
4213 case Token::GT:
4214 Comparison(gt, left, right);
4215 break;
4216
4217 case Token::LTE:
4218 Comparison(le, left, right);
4219 break;
4220
4221 case Token::GTE:
4222 Comparison(ge, left, right);
4223 break;
4224
4225 case Token::EQ_STRICT:
4226 Comparison(eq, left, right, true);
4227 break;
4228
4229 case Token::IN: {
4230 LoadAndSpill(left);
4231 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00004232 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004233 frame_->EmitPush(r0);
4234 break;
4235 }
4236
4237 case Token::INSTANCEOF: {
4238 LoadAndSpill(left);
4239 LoadAndSpill(right);
4240 InstanceofStub stub;
4241 frame_->CallStub(&stub, 2);
4242 // At this point if instanceof succeeded then r0 == 0.
4243 __ tst(r0, Operand(r0));
4244 cc_reg_ = eq;
4245 break;
4246 }
4247
4248 default:
4249 UNREACHABLE();
4250 }
4251 ASSERT((has_cc() && frame_->height() == original_height) ||
4252 (!has_cc() && frame_->height() == original_height + 1));
4253}
4254
4255
4256#ifdef DEBUG
4257bool CodeGenerator::HasValidEntryRegisters() { return true; }
4258#endif
4259
4260
4261#undef __
4262#define __ ACCESS_MASM(masm)
4263
4264
4265Handle<String> Reference::GetName() {
4266 ASSERT(type_ == NAMED);
4267 Property* property = expression_->AsProperty();
4268 if (property == NULL) {
4269 // Global variable reference treated as a named property reference.
4270 VariableProxy* proxy = expression_->AsVariableProxy();
4271 ASSERT(proxy->AsVariable() != NULL);
4272 ASSERT(proxy->AsVariable()->is_global());
4273 return proxy->name();
4274 } else {
4275 Literal* raw_name = property->key()->AsLiteral();
4276 ASSERT(raw_name != NULL);
4277 return Handle<String>(String::cast(*raw_name->handle()));
4278 }
4279}
4280
4281
Steve Blockd0582a62009-12-15 09:54:21 +00004282void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004283 ASSERT(cgen_->HasValidEntryRegisters());
4284 ASSERT(!is_illegal());
4285 ASSERT(!cgen_->has_cc());
4286 MacroAssembler* masm = cgen_->masm();
4287 Property* property = expression_->AsProperty();
4288 if (property != NULL) {
4289 cgen_->CodeForSourcePosition(property->position());
4290 }
4291
4292 switch (type_) {
4293 case SLOT: {
4294 Comment cmnt(masm, "[ Load from Slot");
4295 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4296 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00004297 cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004298 break;
4299 }
4300
4301 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004302 VirtualFrame* frame = cgen_->frame();
4303 Comment cmnt(masm, "[ Load from named Property");
4304 Handle<String> name(GetName());
4305 Variable* var = expression_->AsVariableProxy()->AsVariable();
4306 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4307 // Setup the name register.
4308 Result name_reg(r2);
4309 __ mov(r2, Operand(name));
4310 ASSERT(var == NULL || var->is_global());
4311 RelocInfo::Mode rmode = (var == NULL)
4312 ? RelocInfo::CODE_TARGET
4313 : RelocInfo::CODE_TARGET_CONTEXT;
4314 frame->CallCodeObject(ic, rmode, &name_reg, 0);
4315 frame->EmitPush(r0);
4316 break;
4317 }
4318
4319 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004320 // TODO(181): Implement inlined version of array indexing once
4321 // loop nesting is properly tracked on ARM.
4322 VirtualFrame* frame = cgen_->frame();
4323 Comment cmnt(masm, "[ Load from keyed Property");
4324 ASSERT(property != NULL);
4325 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4326 Variable* var = expression_->AsVariableProxy()->AsVariable();
4327 ASSERT(var == NULL || var->is_global());
4328 RelocInfo::Mode rmode = (var == NULL)
4329 ? RelocInfo::CODE_TARGET
4330 : RelocInfo::CODE_TARGET_CONTEXT;
4331 frame->CallCodeObject(ic, rmode, 0);
4332 frame->EmitPush(r0);
4333 break;
4334 }
4335
4336 default:
4337 UNREACHABLE();
4338 }
4339}
4340
4341
4342void Reference::SetValue(InitState init_state) {
4343 ASSERT(!is_illegal());
4344 ASSERT(!cgen_->has_cc());
4345 MacroAssembler* masm = cgen_->masm();
4346 VirtualFrame* frame = cgen_->frame();
4347 Property* property = expression_->AsProperty();
4348 if (property != NULL) {
4349 cgen_->CodeForSourcePosition(property->position());
4350 }
4351
4352 switch (type_) {
4353 case SLOT: {
4354 Comment cmnt(masm, "[ Store to Slot");
4355 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00004356 cgen_->StoreToSlot(slot, init_state);
Steve Blocka7e24c12009-10-30 11:49:00 +00004357 break;
4358 }
4359
4360 case NAMED: {
4361 Comment cmnt(masm, "[ Store to named Property");
4362 // Call the appropriate IC code.
4363 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4364 Handle<String> name(GetName());
4365
4366 Result value(r0);
4367 frame->EmitPop(r0);
4368
4369 // Setup the name register.
4370 Result property_name(r2);
4371 __ mov(r2, Operand(name));
4372 frame->CallCodeObject(ic,
4373 RelocInfo::CODE_TARGET,
4374 &value,
4375 &property_name,
4376 0);
4377 frame->EmitPush(r0);
4378 break;
4379 }
4380
4381 case KEYED: {
4382 Comment cmnt(masm, "[ Store to keyed Property");
4383 Property* property = expression_->AsProperty();
4384 ASSERT(property != NULL);
4385 cgen_->CodeForSourcePosition(property->position());
4386
4387 // Call IC code.
4388 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4389 // TODO(1222589): Make the IC grab the values from the stack.
4390 Result value(r0);
4391 frame->EmitPop(r0); // value
4392 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
4393 frame->EmitPush(r0);
4394 break;
4395 }
4396
4397 default:
4398 UNREACHABLE();
4399 }
4400}
4401
4402
Leon Clarkee46be812010-01-19 14:06:41 +00004403void FastNewClosureStub::Generate(MacroAssembler* masm) {
4404 // Clone the boilerplate in new space. Set the context to the
4405 // current context in cp.
4406 Label gc;
4407
4408 // Pop the boilerplate function from the stack.
4409 __ pop(r3);
4410
4411 // Attempt to allocate new JSFunction in new space.
4412 __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
4413 r0,
4414 r1,
4415 r2,
4416 &gc,
4417 TAG_OBJECT);
4418
4419 // Compute the function map in the current global context and set that
4420 // as the map of the allocated object.
4421 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4422 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4423 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
4424 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4425
4426 // Clone the rest of the boilerplate fields. We don't have to update
4427 // the write barrier because the allocated object is in new space.
4428 for (int offset = kPointerSize;
4429 offset < JSFunction::kSize;
4430 offset += kPointerSize) {
4431 if (offset == JSFunction::kContextOffset) {
4432 __ str(cp, FieldMemOperand(r0, offset));
4433 } else {
4434 __ ldr(r1, FieldMemOperand(r3, offset));
4435 __ str(r1, FieldMemOperand(r0, offset));
4436 }
4437 }
4438
4439 // Return result. The argument boilerplate has been popped already.
4440 __ Ret();
4441
4442 // Create a new closure through the slower runtime call.
4443 __ bind(&gc);
4444 __ push(cp);
4445 __ push(r3);
4446 __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
4447}
4448
4449
4450void FastNewContextStub::Generate(MacroAssembler* masm) {
4451 // Try to allocate the context in new space.
4452 Label gc;
4453 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
4454
4455 // Attempt to allocate the context in new space.
4456 __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
4457 r0,
4458 r1,
4459 r2,
4460 &gc,
4461 TAG_OBJECT);
4462
4463 // Load the function from the stack.
4464 __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
4465
4466 // Setup the object header.
4467 __ LoadRoot(r2, Heap::kContextMapRootIndex);
4468 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4469 __ mov(r2, Operand(length));
4470 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
4471
4472 // Setup the fixed slots.
4473 __ mov(r1, Operand(Smi::FromInt(0)));
4474 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
4475 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
4476 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4477 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
4478
4479 // Copy the global object from the surrounding context.
4480 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4481 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
4482
4483 // Initialize the rest of the slots to undefined.
4484 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
4485 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
4486 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
4487 }
4488
4489 // Remove the on-stack argument and return.
4490 __ mov(cp, r0);
4491 __ pop();
4492 __ Ret();
4493
4494 // Need to collect. Call into runtime system.
4495 __ bind(&gc);
4496 __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
4497}
4498
4499
Steve Blocka7e24c12009-10-30 11:49:00 +00004500// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4501// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4502// (31 instead of 32).
4503static void CountLeadingZeros(
4504 MacroAssembler* masm,
4505 Register source,
4506 Register scratch,
4507 Register zeros) {
4508#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4509 __ clz(zeros, source); // This instruction is only supported after ARM5.
4510#else
4511 __ mov(zeros, Operand(0));
4512 __ mov(scratch, source);
4513 // Top 16.
4514 __ tst(scratch, Operand(0xffff0000));
4515 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4516 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4517 // Top 8.
4518 __ tst(scratch, Operand(0xff000000));
4519 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4520 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4521 // Top 4.
4522 __ tst(scratch, Operand(0xf0000000));
4523 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4524 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4525 // Top 2.
4526 __ tst(scratch, Operand(0xc0000000));
4527 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4528 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4529 // Top bit.
4530 __ tst(scratch, Operand(0x80000000u));
4531 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4532#endif
4533}
4534
4535
4536// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4537// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4538// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4539// scratch register. Destroys the source register. No GC occurs during this
4540// stub so you don't have to set up the frame.
4541class ConvertToDoubleStub : public CodeStub {
4542 public:
4543 ConvertToDoubleStub(Register result_reg_1,
4544 Register result_reg_2,
4545 Register source_reg,
4546 Register scratch_reg)
4547 : result1_(result_reg_1),
4548 result2_(result_reg_2),
4549 source_(source_reg),
4550 zeros_(scratch_reg) { }
4551
4552 private:
4553 Register result1_;
4554 Register result2_;
4555 Register source_;
4556 Register zeros_;
4557
4558 // Minor key encoding in 16 bits.
4559 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4560 class OpBits: public BitField<Token::Value, 2, 14> {};
4561
4562 Major MajorKey() { return ConvertToDouble; }
4563 int MinorKey() {
4564 // Encode the parameters in a unique 16 bit value.
4565 return result1_.code() +
4566 (result2_.code() << 4) +
4567 (source_.code() << 8) +
4568 (zeros_.code() << 12);
4569 }
4570
4571 void Generate(MacroAssembler* masm);
4572
4573 const char* GetName() { return "ConvertToDoubleStub"; }
4574
4575#ifdef DEBUG
4576 void Print() { PrintF("ConvertToDoubleStub\n"); }
4577#endif
4578};
4579
4580
4581void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4582#ifndef BIG_ENDIAN_FLOATING_POINT
4583 Register exponent = result1_;
4584 Register mantissa = result2_;
4585#else
4586 Register exponent = result2_;
4587 Register mantissa = result1_;
4588#endif
4589 Label not_special;
4590 // Convert from Smi to integer.
4591 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4592 // Move sign bit from source to destination. This works because the sign bit
4593 // in the exponent word of the double has the same position and polarity as
4594 // the 2's complement sign bit in a Smi.
4595 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4596 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4597 // Subtract from 0 if source was negative.
4598 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4599 __ cmp(source_, Operand(1));
4600 __ b(gt, &not_special);
4601
4602 // We have -1, 0 or 1, which we treat specially.
4603 __ cmp(source_, Operand(0));
4604 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4605 static const uint32_t exponent_word_for_1 =
4606 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4607 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4608 // 1, 0 and -1 all have 0 for the second word.
4609 __ mov(mantissa, Operand(0));
4610 __ Ret();
4611
4612 __ bind(&not_special);
4613 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4614 // Gets the wrong answer for 0, but we already checked for that case above.
4615 CountLeadingZeros(masm, source_, mantissa, zeros_);
4616 // Compute exponent and or it into the exponent register.
4617 // We use result2 as a scratch register here.
4618 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4619 __ orr(exponent,
4620 exponent,
4621 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4622 // Shift up the source chopping the top bit off.
4623 __ add(zeros_, zeros_, Operand(1));
4624 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4625 __ mov(source_, Operand(source_, LSL, zeros_));
4626 // Compute lower part of fraction (last 12 bits).
4627 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4628 // And the top (top 20 bits).
4629 __ orr(exponent,
4630 exponent,
4631 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4632 __ Ret();
4633}
4634
4635
4636// This stub can convert a signed int32 to a heap number (double). It does
4637// not work for int32s that are in Smi range! No GC occurs during this stub
4638// so you don't have to set up the frame.
4639class WriteInt32ToHeapNumberStub : public CodeStub {
4640 public:
4641 WriteInt32ToHeapNumberStub(Register the_int,
4642 Register the_heap_number,
4643 Register scratch)
4644 : the_int_(the_int),
4645 the_heap_number_(the_heap_number),
4646 scratch_(scratch) { }
4647
4648 private:
4649 Register the_int_;
4650 Register the_heap_number_;
4651 Register scratch_;
4652
4653 // Minor key encoding in 16 bits.
4654 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4655 class OpBits: public BitField<Token::Value, 2, 14> {};
4656
4657 Major MajorKey() { return WriteInt32ToHeapNumber; }
4658 int MinorKey() {
4659 // Encode the parameters in a unique 16 bit value.
4660 return the_int_.code() +
4661 (the_heap_number_.code() << 4) +
4662 (scratch_.code() << 8);
4663 }
4664
4665 void Generate(MacroAssembler* masm);
4666
4667 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4668
4669#ifdef DEBUG
4670 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4671#endif
4672};
4673
4674
4675// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00004676void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004677 Label max_negative_int;
4678 // the_int_ has the answer which is a signed int32 but not a Smi.
4679 // We test for the special value that has a different exponent. This test
4680 // has the neat side effect of setting the flags according to the sign.
4681 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4682 __ cmp(the_int_, Operand(0x80000000u));
4683 __ b(eq, &max_negative_int);
4684 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4685 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4686 uint32_t non_smi_exponent =
4687 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4688 __ mov(scratch_, Operand(non_smi_exponent));
4689 // Set the sign bit in scratch_ if the value was negative.
4690 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4691 // Subtract from 0 if the value was negative.
4692 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4693 // We should be masking the implict first digit of the mantissa away here,
4694 // but it just ends up combining harmlessly with the last digit of the
4695 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4696 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4697 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4698 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4699 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4700 __ str(scratch_, FieldMemOperand(the_heap_number_,
4701 HeapNumber::kExponentOffset));
4702 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4703 __ str(scratch_, FieldMemOperand(the_heap_number_,
4704 HeapNumber::kMantissaOffset));
4705 __ Ret();
4706
4707 __ bind(&max_negative_int);
4708 // The max negative int32 is stored as a positive number in the mantissa of
4709 // a double because it uses a sign bit instead of using two's complement.
4710 // The actual mantissa bits stored are all 0 because the implicit most
4711 // significant 1 bit is not stored.
4712 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4713 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4714 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4715 __ mov(ip, Operand(0));
4716 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4717 __ Ret();
4718}
4719
4720
4721// Handle the case where the lhs and rhs are the same object.
4722// Equality is almost reflexive (everything but NaN), so this is a test
4723// for "identity and not NaN".
4724static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4725 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00004726 Condition cc,
4727 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004728 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00004729 Label heap_number, return_equal;
4730 Register exp_mask_reg = r5;
Steve Blocka7e24c12009-10-30 11:49:00 +00004731 __ cmp(r0, Operand(r1));
4732 __ b(ne, &not_identical);
4733
Leon Clarkee46be812010-01-19 14:06:41 +00004734 // The two objects are identical. If we know that one of them isn't NaN then
4735 // we now know they test equal.
4736 if (cc != eq || !never_nan_nan) {
4737 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004738
Leon Clarkee46be812010-01-19 14:06:41 +00004739 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4740 // so we do the second best thing - test it ourselves.
4741 // They are both equal and they are not both Smis so both of them are not
4742 // Smis. If it's not a heap number, then return equal.
4743 if (cc == lt || cc == gt) {
4744 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004745 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00004746 } else {
4747 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4748 __ b(eq, &heap_number);
4749 // Comparing JS objects with <=, >= is complicated.
4750 if (cc != eq) {
4751 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4752 __ b(ge, slow);
4753 // Normally here we fall through to return_equal, but undefined is
4754 // special: (undefined == undefined) == true, but
4755 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
4756 if (cc == le || cc == ge) {
4757 __ cmp(r4, Operand(ODDBALL_TYPE));
4758 __ b(ne, &return_equal);
4759 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4760 __ cmp(r0, Operand(r2));
4761 __ b(ne, &return_equal);
4762 if (cc == le) {
4763 // undefined <= undefined should fail.
4764 __ mov(r0, Operand(GREATER));
4765 } else {
4766 // undefined >= undefined should fail.
4767 __ mov(r0, Operand(LESS));
4768 }
4769 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00004770 }
Steve Blockd0582a62009-12-15 09:54:21 +00004771 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004772 }
4773 }
Leon Clarkee46be812010-01-19 14:06:41 +00004774
Steve Blocka7e24c12009-10-30 11:49:00 +00004775 __ bind(&return_equal);
4776 if (cc == lt) {
4777 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4778 } else if (cc == gt) {
4779 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4780 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00004781 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00004782 }
4783 __ mov(pc, Operand(lr)); // Return.
4784
Leon Clarkee46be812010-01-19 14:06:41 +00004785 if (cc != eq || !never_nan_nan) {
4786 // For less and greater we don't have to check for NaN since the result of
4787 // x < x is false regardless. For the others here is some code to check
4788 // for NaN.
4789 if (cc != lt && cc != gt) {
4790 __ bind(&heap_number);
4791 // It is a heap number, so return non-equal if it's NaN and equal if it's
4792 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00004793
Leon Clarkee46be812010-01-19 14:06:41 +00004794 // The representation of NaN values has all exponent bits (52..62) set,
4795 // and not all mantissa bits (0..51) clear.
4796 // Read top bits of double representation (second word of value).
4797 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4798 // Test that exponent bits are all set.
4799 __ and_(r3, r2, Operand(exp_mask_reg));
4800 __ cmp(r3, Operand(exp_mask_reg));
4801 __ b(ne, &return_equal);
4802
4803 // Shift out flag and all exponent bits, retaining only mantissa.
4804 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4805 // Or with all low-bits of mantissa.
4806 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4807 __ orr(r0, r3, Operand(r2), SetCC);
4808 // For equal we already have the right value in r0: Return zero (equal)
4809 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
4810 // not (it's a NaN). For <= and >= we need to load r0 with the failing
4811 // value if it's a NaN.
4812 if (cc != eq) {
4813 // All-zero means Infinity means equal.
4814 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4815 if (cc == le) {
4816 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4817 } else {
4818 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4819 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004820 }
Leon Clarkee46be812010-01-19 14:06:41 +00004821 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00004822 }
Leon Clarkee46be812010-01-19 14:06:41 +00004823 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00004824 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004825
4826 __ bind(&not_identical);
4827}
4828
4829
4830// See comment at call site.
4831static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00004832 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00004833 Label* slow,
4834 bool strict) {
4835 Label lhs_is_smi;
4836 __ tst(r0, Operand(kSmiTagMask));
4837 __ b(eq, &lhs_is_smi);
4838
4839 // Rhs is a Smi. Check whether the non-smi is a heap number.
4840 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4841 if (strict) {
4842 // If lhs was not a number and rhs was a Smi then strict equality cannot
4843 // succeed. Return non-equal (r0 is already not zero)
4844 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4845 } else {
4846 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4847 // the runtime.
4848 __ b(ne, slow);
4849 }
4850
4851 // Rhs is a smi, lhs is a number.
4852 __ push(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004853
4854 if (CpuFeatures::IsSupported(VFP3)) {
4855 CpuFeatures::Scope scope(VFP3);
4856 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
4857 } else {
4858 __ mov(r7, Operand(r1));
4859 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4860 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
4861 }
4862
4863
Steve Blocka7e24c12009-10-30 11:49:00 +00004864 // r3 and r2 are rhs as double.
4865 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4866 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4867 // We now have both loaded as doubles but we can skip the lhs nan check
4868 // since it's a Smi.
4869 __ pop(lr);
Leon Clarkee46be812010-01-19 14:06:41 +00004870 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00004871
4872 __ bind(&lhs_is_smi);
4873 // Lhs is a Smi. Check whether the non-smi is a heap number.
4874 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4875 if (strict) {
4876 // If lhs was not a number and rhs was a Smi then strict equality cannot
4877 // succeed. Return non-equal.
4878 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4879 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4880 } else {
4881 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4882 // the runtime.
4883 __ b(ne, slow);
4884 }
4885
4886 // Lhs is a smi, rhs is a number.
4887 // r0 is Smi and r1 is heap number.
4888 __ push(lr);
4889 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4890 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
Steve Blockd0582a62009-12-15 09:54:21 +00004891
4892 if (CpuFeatures::IsSupported(VFP3)) {
4893 CpuFeatures::Scope scope(VFP3);
4894 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
4895 } else {
4896 __ mov(r7, Operand(r0));
4897 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4898 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
4899 }
4900
Steve Blocka7e24c12009-10-30 11:49:00 +00004901 __ pop(lr);
4902 // Fall through to both_loaded_as_doubles.
4903}
4904
4905
Leon Clarkee46be812010-01-19 14:06:41 +00004906void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004907 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004908 Register rhs_exponent = exp_first ? r0 : r1;
4909 Register lhs_exponent = exp_first ? r2 : r3;
4910 Register rhs_mantissa = exp_first ? r1 : r0;
4911 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004912 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00004913 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00004914
4915 Register exp_mask_reg = r5;
4916
4917 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004918 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
4919 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00004920 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00004921 __ mov(r4,
4922 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4923 SetCC);
4924 __ b(ne, &one_is_nan);
4925 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00004926 __ b(ne, &one_is_nan);
4927
4928 __ bind(lhs_not_nan);
4929 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4930 __ bind(&lhs_not_nan_exp_mask_is_loaded);
4931 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
4932 __ cmp(r4, Operand(exp_mask_reg));
4933 __ b(ne, &neither_is_nan);
4934 __ mov(r4,
4935 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4936 SetCC);
4937 __ b(ne, &one_is_nan);
4938 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00004939 __ b(eq, &neither_is_nan);
4940
4941 __ bind(&one_is_nan);
4942 // NaN comparisons always fail.
4943 // Load whatever we need in r0 to make the comparison fail.
4944 if (cc == lt || cc == le) {
4945 __ mov(r0, Operand(GREATER));
4946 } else {
4947 __ mov(r0, Operand(LESS));
4948 }
4949 __ mov(pc, Operand(lr)); // Return.
4950
4951 __ bind(&neither_is_nan);
4952}
4953
4954
4955// See comment at call site.
4956static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
4957 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004958 Register rhs_exponent = exp_first ? r0 : r1;
4959 Register lhs_exponent = exp_first ? r2 : r3;
4960 Register rhs_mantissa = exp_first ? r1 : r0;
4961 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004962
4963 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
4964 if (cc == eq) {
4965 // Doubles are not equal unless they have the same bit pattern.
4966 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00004967 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
4968 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00004969 // Return non-zero if the numbers are unequal.
4970 __ mov(pc, Operand(lr), LeaveCC, ne);
4971
Leon Clarkee46be812010-01-19 14:06:41 +00004972 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004973 // If exponents are equal then return 0.
4974 __ mov(pc, Operand(lr), LeaveCC, eq);
4975
4976 // Exponents are unequal. The only way we can return that the numbers
4977 // are equal is if one is -0 and the other is 0. We already dealt
4978 // with the case where both are -0 or both are 0.
4979 // We start by seeing if the mantissas (that are equal) or the bottom
4980 // 31 bits of the rhs exponent are non-zero. If so we return not
4981 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00004982 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004983 __ mov(r0, Operand(r4), LeaveCC, ne);
4984 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
4985 // Now they are equal if and only if the lhs exponent is zero in its
4986 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00004987 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00004988 __ mov(pc, Operand(lr));
4989 } else {
4990 // Call a native function to do a comparison between two non-NaNs.
4991 // Call C routine that may not cause GC or other trouble.
4992 __ mov(r5, Operand(ExternalReference::compare_doubles()));
4993 __ Jump(r5); // Tail call.
4994 }
4995}
4996
4997
4998// See comment at call site.
4999static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
5000 // If either operand is a JSObject or an oddball value, then they are
5001 // not equal since their pointers are different.
5002 // There is no test for undetectability in strict equality.
5003 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
5004 Label first_non_object;
5005 // Get the type of the first operand into r2 and compare it with
5006 // FIRST_JS_OBJECT_TYPE.
5007 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
5008 __ b(lt, &first_non_object);
5009
5010 // Return non-zero (r0 is not zero)
5011 Label return_not_equal;
5012 __ bind(&return_not_equal);
5013 __ mov(pc, Operand(lr)); // Return.
5014
5015 __ bind(&first_non_object);
5016 // Check for oddballs: true, false, null, undefined.
5017 __ cmp(r2, Operand(ODDBALL_TYPE));
5018 __ b(eq, &return_not_equal);
5019
5020 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
5021 __ b(ge, &return_not_equal);
5022
5023 // Check for oddballs: true, false, null, undefined.
5024 __ cmp(r3, Operand(ODDBALL_TYPE));
5025 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00005026
5027 // Now that we have the types we might as well check for symbol-symbol.
5028 // Ensure that no non-strings have the symbol bit set.
5029 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5030 ASSERT(kSymbolTag != 0);
5031 __ and_(r2, r2, Operand(r3));
5032 __ tst(r2, Operand(kIsSymbolMask));
5033 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00005034}
5035
5036
5037// See comment at call site.
5038static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
5039 Label* both_loaded_as_doubles,
5040 Label* not_heap_numbers,
5041 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00005042 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005043 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00005044 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5045 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00005046 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
5047
5048 // Both are heap numbers. Load them up then jump to the code we have
5049 // for that.
5050 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5051 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5052 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5053 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5054 __ jmp(both_loaded_as_doubles);
5055}
5056
5057
5058// Fast negative check for symbol-to-symbol equality.
5059static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5060 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00005061 // Ensure that no non-strings have the symbol bit set.
5062 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5063 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005064 __ tst(r2, Operand(kIsSymbolMask));
5065 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00005066 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
5067 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005068 __ tst(r3, Operand(kIsSymbolMask));
5069 __ b(eq, slow);
5070
5071 // Both are symbols. We already checked they weren't the same pointer
5072 // so they are not equal.
5073 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
5074 __ mov(pc, Operand(lr)); // Return.
5075}
5076
5077
5078// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
5079// positive or negative to indicate the result of the comparison.
5080void CompareStub::Generate(MacroAssembler* masm) {
5081 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00005082 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00005083
5084 // NOTICE! This code is only reached after a smi-fast-case check, so
5085 // it is certain that at least one operand isn't a smi.
5086
5087 // Handle the case where the objects are identical. Either returns the answer
5088 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00005089 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005090
5091 // If either is a Smi (we know that not both are), then they can only
5092 // be strictly equal if the other is a HeapNumber.
5093 ASSERT_EQ(0, kSmiTag);
5094 ASSERT_EQ(0, Smi::FromInt(0));
5095 __ and_(r2, r0, Operand(r1));
5096 __ tst(r2, Operand(kSmiTagMask));
5097 __ b(ne, &not_smis);
5098 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5099 // 1) Return the answer.
5100 // 2) Go to slow.
5101 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00005102 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00005103 // In cases 3 and 4 we have found out we were dealing with a number-number
5104 // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00005105 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005106
5107 __ bind(&both_loaded_as_doubles);
Leon Clarkee46be812010-01-19 14:06:41 +00005108 // r0, r1, r2, r3 are the double representations of the right hand side
5109 // and the left hand side.
Steve Blocka7e24c12009-10-30 11:49:00 +00005110
Steve Blockd0582a62009-12-15 09:54:21 +00005111 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00005112 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00005113 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00005114 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00005115 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00005116 __ vmov(d6, r0, r1);
5117 __ vmov(d7, r2, r3);
Steve Blockd0582a62009-12-15 09:54:21 +00005118
Leon Clarkee46be812010-01-19 14:06:41 +00005119 __ vcmp(d7, d6);
5120 __ vmrs(pc); // Move vector status bits to normal status bits.
5121 Label nan;
5122 __ b(vs, &nan);
5123 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
5124 __ mov(r0, Operand(LESS), LeaveCC, lt);
5125 __ mov(r0, Operand(GREATER), LeaveCC, gt);
5126 __ mov(pc, Operand(lr));
5127
5128 __ bind(&nan);
5129 // If one of the sides was a NaN then the v flag is set. Load r0 with
5130 // whatever it takes to make the comparison fail, since comparisons with NaN
5131 // always fail.
5132 if (cc_ == lt || cc_ == le) {
5133 __ mov(r0, Operand(GREATER));
5134 } else {
5135 __ mov(r0, Operand(LESS));
5136 }
Steve Blockd0582a62009-12-15 09:54:21 +00005137 __ mov(pc, Operand(lr));
5138 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00005139 // Checks for NaN in the doubles we have loaded. Can return the answer or
5140 // fall through if neither is a NaN. Also binds lhs_not_nan.
5141 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00005142 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
5143 // answer. Never falls through.
5144 EmitTwoNonNanDoubleComparison(masm, cc_);
5145 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005146
5147 __ bind(&not_smis);
5148 // At this point we know we are dealing with two different objects,
5149 // and neither of them is a Smi. The objects are in r0 and r1.
5150 if (strict_) {
5151 // This returns non-equal for some object types, or falls through if it
5152 // was not lucky.
5153 EmitStrictTwoHeapObjectCompare(masm);
5154 }
5155
5156 Label check_for_symbols;
5157 // Check for heap-number-heap-number comparison. Can jump to slow case,
5158 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
5159 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00005160 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00005161 EmitCheckForTwoHeapNumbers(masm,
5162 &both_loaded_as_doubles,
5163 &check_for_symbols,
5164 &slow);
5165
5166 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00005167 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
5168 // symbols.
5169 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005170 // Either jumps to slow or returns the answer. Assumes that r2 is the type
5171 // of r0 on entry.
5172 EmitCheckForSymbols(masm, &slow);
5173 }
5174
5175 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005176 __ push(r1);
5177 __ push(r0);
5178 // Figure out which native to call and setup the arguments.
5179 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00005180 if (cc_ == eq) {
5181 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5182 } else {
5183 native = Builtins::COMPARE;
5184 int ncr; // NaN compare result
5185 if (cc_ == lt || cc_ == le) {
5186 ncr = GREATER;
5187 } else {
5188 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
5189 ncr = LESS;
5190 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005191 __ mov(r0, Operand(Smi::FromInt(ncr)));
5192 __ push(r0);
5193 }
5194
5195 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5196 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00005197 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00005198}
5199
5200
5201// Allocates a heap number or jumps to the label if the young space is full and
5202// a scavenge is needed.
5203static void AllocateHeapNumber(
5204 MacroAssembler* masm,
5205 Label* need_gc, // Jump here if young space is full.
5206 Register result, // The tagged address of the new heap number.
5207 Register scratch1, // A scratch register.
5208 Register scratch2) { // Another scratch register.
5209 // Allocate an object in the heap for the heap number and tag it as a heap
5210 // object.
5211 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
5212 result,
5213 scratch1,
5214 scratch2,
5215 need_gc,
5216 TAG_OBJECT);
5217
5218 // Get heap number map and store it in the allocated object.
5219 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
5220 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5221}
5222
5223
5224// We fall into this code if the operands were Smis, but the result was
5225// not (eg. overflow). We branch into this code (to the not_smi label) if
5226// the operands were not both Smi. The operands are in r0 and r1. In order
5227// to call the C-implemented binary fp operation routines we need to end up
5228// with the double precision floating point operands in r0 and r1 (for the
5229// value in r1) and r2 and r3 (for the value in r0).
5230static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5231 Label* not_smi,
5232 const Builtins::JavaScript& builtin,
5233 Token::Value operation,
5234 OverwriteMode mode) {
5235 Label slow, slow_pop_2_first, do_the_call;
5236 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5237 // Smi-smi case (overflow).
5238 // Since both are Smis there is no heap number to overwrite, so allocate.
5239 // The new heap number is in r5. r6 and r7 are scratch.
5240 AllocateHeapNumber(masm, &slow, r5, r6, r7);
Steve Blockd0582a62009-12-15 09:54:21 +00005241
5242 if (CpuFeatures::IsSupported(VFP3)) {
5243 CpuFeatures::Scope scope(VFP3);
5244 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
5245 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
5246 } else {
5247 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5248 __ mov(r7, Operand(r0));
5249 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5250 __ push(lr);
5251 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5252 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5253 __ mov(r7, Operand(r1));
5254 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5255 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5256 __ pop(lr);
5257 }
5258
Steve Blocka7e24c12009-10-30 11:49:00 +00005259 __ jmp(&do_the_call); // Tail call. No return.
5260
5261 // We jump to here if something goes wrong (one param is not a number of any
5262 // sort or new-space allocation fails).
5263 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005264
5265 // Push arguments to the stack
Steve Blocka7e24c12009-10-30 11:49:00 +00005266 __ push(r1);
5267 __ push(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00005268
5269 if (Token::ADD == operation) {
5270 // Test for string arguments before calling runtime.
5271 // r1 : first argument
5272 // r0 : second argument
5273 // sp[0] : second argument
5274 // sp[1] : first argument
5275
5276 Label not_strings, not_string1, string1;
5277 __ tst(r1, Operand(kSmiTagMask));
5278 __ b(eq, &not_string1);
5279 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
5280 __ b(ge, &not_string1);
5281
5282 // First argument is a a string, test second.
5283 __ tst(r0, Operand(kSmiTagMask));
5284 __ b(eq, &string1);
5285 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5286 __ b(ge, &string1);
5287
5288 // First and second argument are strings.
5289 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
5290
5291 // Only first argument is a string.
5292 __ bind(&string1);
5293 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
5294
5295 // First argument was not a string, test second.
5296 __ bind(&not_string1);
5297 __ tst(r0, Operand(kSmiTagMask));
5298 __ b(eq, &not_strings);
5299 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5300 __ b(ge, &not_strings);
5301
5302 // Only second argument is a string.
5303 __ b(&not_strings);
5304 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5305
5306 __ bind(&not_strings);
5307 }
5308
Steve Blocka7e24c12009-10-30 11:49:00 +00005309 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5310
5311 // We branch here if at least one of r0 and r1 is not a Smi.
5312 __ bind(not_smi);
5313 if (mode == NO_OVERWRITE) {
5314 // In the case where there is no chance of an overwritable float we may as
5315 // well do the allocation immediately while r0 and r1 are untouched.
5316 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5317 }
5318
5319 // Move r0 to a double in r2-r3.
5320 __ tst(r0, Operand(kSmiTagMask));
5321 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5322 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5323 __ b(ne, &slow);
5324 if (mode == OVERWRITE_RIGHT) {
5325 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5326 }
5327 // Calling convention says that second double is in r2 and r3.
5328 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5329 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5330 __ jmp(&finished_loading_r0);
5331 __ bind(&r0_is_smi);
5332 if (mode == OVERWRITE_RIGHT) {
5333 // We can't overwrite a Smi so get address of new heap number into r5.
5334 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5335 }
Steve Blockd0582a62009-12-15 09:54:21 +00005336
5337
5338 if (CpuFeatures::IsSupported(VFP3)) {
5339 CpuFeatures::Scope scope(VFP3);
5340 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
5341 } else {
5342 // Write Smi from r0 to r3 and r2 in double format.
5343 __ mov(r7, Operand(r0));
5344 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5345 __ push(lr);
5346 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5347 __ pop(lr);
5348 }
5349
Steve Blocka7e24c12009-10-30 11:49:00 +00005350 __ bind(&finished_loading_r0);
5351
5352 // Move r1 to a double in r0-r1.
5353 __ tst(r1, Operand(kSmiTagMask));
5354 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5355 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5356 __ b(ne, &slow);
5357 if (mode == OVERWRITE_LEFT) {
5358 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5359 }
5360 // Calling convention says that first double is in r0 and r1.
5361 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5362 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5363 __ jmp(&finished_loading_r1);
5364 __ bind(&r1_is_smi);
5365 if (mode == OVERWRITE_LEFT) {
5366 // We can't overwrite a Smi so get address of new heap number into r5.
5367 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5368 }
Steve Blockd0582a62009-12-15 09:54:21 +00005369
5370 if (CpuFeatures::IsSupported(VFP3)) {
5371 CpuFeatures::Scope scope(VFP3);
5372 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
5373 } else {
5374 // Write Smi from r1 to r1 and r0 in double format.
5375 __ mov(r7, Operand(r1));
5376 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5377 __ push(lr);
5378 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5379 __ pop(lr);
5380 }
5381
Steve Blocka7e24c12009-10-30 11:49:00 +00005382 __ bind(&finished_loading_r1);
5383
5384 __ bind(&do_the_call);
5385 // r0: Left value (least significant part of mantissa).
5386 // r1: Left value (sign, exponent, top of mantissa).
5387 // r2: Right value (least significant part of mantissa).
5388 // r3: Right value (sign, exponent, top of mantissa).
5389 // r5: Address of heap number for result.
Steve Blockd0582a62009-12-15 09:54:21 +00005390
5391 if (CpuFeatures::IsSupported(VFP3) &&
5392 ((Token::MUL == operation) ||
5393 (Token::DIV == operation) ||
5394 (Token::ADD == operation) ||
5395 (Token::SUB == operation))) {
5396 CpuFeatures::Scope scope(VFP3);
5397 // ARMv7 VFP3 instructions to implement
5398 // double precision, add, subtract, multiply, divide.
Leon Clarkee46be812010-01-19 14:06:41 +00005399 __ vmov(d6, r0, r1);
5400 __ vmov(d7, r2, r3);
Steve Blockd0582a62009-12-15 09:54:21 +00005401
5402 if (Token::MUL == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005403 __ vmul(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005404 } else if (Token::DIV == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005405 __ vdiv(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005406 } else if (Token::ADD == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005407 __ vadd(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005408 } else if (Token::SUB == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005409 __ vsub(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005410 } else {
5411 UNREACHABLE();
5412 }
5413
Leon Clarkee46be812010-01-19 14:06:41 +00005414 __ vmov(r0, r1, d5);
Steve Blockd0582a62009-12-15 09:54:21 +00005415
5416 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
5417 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
5418 __ mov(r0, Operand(r5));
5419 __ mov(pc, lr);
5420 return;
5421 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005422 __ push(lr); // For later.
5423 __ push(r5); // Address of heap number that is answer.
5424 __ AlignStack(0);
5425 // Call C routine that may not cause GC or other trouble.
5426 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5427 __ Call(r5);
5428 __ pop(r4); // Address of heap number.
5429 __ cmp(r4, Operand(Smi::FromInt(0)));
5430 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5431 // Store answer in the overwritable heap number.
5432#if !defined(USE_ARM_EABI)
5433 // Double returned in fp coprocessor register 0 and 1, encoded as register
5434 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5435 // substract the tag from r4.
5436 __ sub(r5, r4, Operand(kHeapObjectTag));
5437 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5438#else
5439 // Double returned in registers 0 and 1.
5440 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5441 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5442#endif
5443 __ mov(r0, Operand(r4));
5444 // And we are done.
5445 __ pop(pc);
5446}
5447
5448
5449// Tries to get a signed int32 out of a double precision floating point heap
5450// number. Rounds towards 0. Fastest for doubles that are in the ranges
5451// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5452// almost to the range of signed int32 values that are not Smis. Jumps to the
5453// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5454// (excluding the endpoints).
5455static void GetInt32(MacroAssembler* masm,
5456 Register source,
5457 Register dest,
5458 Register scratch,
5459 Register scratch2,
5460 Label* slow) {
5461 Label right_exponent, done;
5462 // Get exponent word.
5463 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5464 // Get exponent alone in scratch2.
5465 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5466 // Load dest with zero. We use this either for the final shift or
5467 // for the answer.
5468 __ mov(dest, Operand(0));
5469 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5470 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5471 // the exponent that we are fastest at and also the highest exponent we can
5472 // handle here.
5473 const uint32_t non_smi_exponent =
5474 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5475 __ cmp(scratch2, Operand(non_smi_exponent));
5476 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5477 __ b(eq, &right_exponent);
5478 // If the exponent is higher than that then go to slow case. This catches
5479 // numbers that don't fit in a signed int32, infinities and NaNs.
5480 __ b(gt, slow);
5481
5482 // We know the exponent is smaller than 30 (biased). If it is less than
5483 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5484 // it rounds to zero.
5485 const uint32_t zero_exponent =
5486 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5487 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5488 // Dest already has a Smi zero.
5489 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00005490 if (!CpuFeatures::IsSupported(VFP3)) {
5491 // We have a shifted exponent between 0 and 30 in scratch2.
5492 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5493 // We now have the exponent in dest. Subtract from 30 to get
5494 // how much to shift down.
5495 __ rsb(dest, dest, Operand(30));
5496 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005497 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00005498 if (CpuFeatures::IsSupported(VFP3)) {
5499 CpuFeatures::Scope scope(VFP3);
5500 // ARMv7 VFP3 instructions implementing double precision to integer
5501 // conversion using round to zero.
5502 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005503 __ vmov(d7, scratch2, scratch);
5504 __ vcvt(s15, d7);
5505 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005506 } else {
5507 // Get the top bits of the mantissa.
5508 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5509 // Put back the implicit 1.
5510 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5511 // Shift up the mantissa bits to take up the space the exponent used to
5512 // take. We just orred in the implicit bit so that took care of one and
5513 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
5514 // distance.
5515 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5516 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5517 // Put sign in zero flag.
5518 __ tst(scratch, Operand(HeapNumber::kSignMask));
5519 // Get the second half of the double. For some exponents we don't
5520 // actually need this because the bits get shifted out again, but
5521 // it's probably slower to test than just to do it.
5522 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5523 // Shift down 22 bits to get the last 10 bits.
5524 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5525 // Move down according to the exponent.
5526 __ mov(dest, Operand(scratch, LSR, dest));
5527 // Fix sign if sign bit was set.
5528 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5529 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005530 __ bind(&done);
5531}
5532
Steve Blocka7e24c12009-10-30 11:49:00 +00005533// For bitwise ops where the inputs are not both Smis we here try to determine
5534// whether both inputs are either Smis or at least heap numbers that can be
5535// represented by a 32 bit signed value. We truncate towards zero as required
5536// by the ES spec. If this is the case we do the bitwise op and see if the
5537// result is a Smi. If so, great, otherwise we try to find a heap number to
5538// write the answer into (either by allocating or by overwriting).
5539// On entry the operands are in r0 and r1. On exit the answer is in r0.
5540void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5541 Label slow, result_not_a_smi;
5542 Label r0_is_smi, r1_is_smi;
5543 Label done_checking_r0, done_checking_r1;
5544
5545 __ tst(r1, Operand(kSmiTagMask));
5546 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5547 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5548 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005549 GetInt32(masm, r1, r3, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005550 __ jmp(&done_checking_r1);
5551 __ bind(&r1_is_smi);
5552 __ mov(r3, Operand(r1, ASR, 1));
5553 __ bind(&done_checking_r1);
5554
5555 __ tst(r0, Operand(kSmiTagMask));
5556 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5557 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5558 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005559 GetInt32(masm, r0, r2, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005560 __ jmp(&done_checking_r0);
5561 __ bind(&r0_is_smi);
5562 __ mov(r2, Operand(r0, ASR, 1));
5563 __ bind(&done_checking_r0);
5564
5565 // r0 and r1: Original operands (Smi or heap numbers).
5566 // r2 and r3: Signed int32 operands.
5567 switch (op_) {
5568 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5569 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5570 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5571 case Token::SAR:
5572 // Use only the 5 least significant bits of the shift count.
5573 __ and_(r2, r2, Operand(0x1f));
5574 __ mov(r2, Operand(r3, ASR, r2));
5575 break;
5576 case Token::SHR:
5577 // Use only the 5 least significant bits of the shift count.
5578 __ and_(r2, r2, Operand(0x1f));
5579 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5580 // SHR is special because it is required to produce a positive answer.
5581 // The code below for writing into heap numbers isn't capable of writing
5582 // the register as an unsigned int so we go to slow case if we hit this
5583 // case.
5584 __ b(mi, &slow);
5585 break;
5586 case Token::SHL:
5587 // Use only the 5 least significant bits of the shift count.
5588 __ and_(r2, r2, Operand(0x1f));
5589 __ mov(r2, Operand(r3, LSL, r2));
5590 break;
5591 default: UNREACHABLE();
5592 }
5593 // check that the *signed* result fits in a smi
5594 __ add(r3, r2, Operand(0x40000000), SetCC);
5595 __ b(mi, &result_not_a_smi);
5596 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5597 __ Ret();
5598
5599 Label have_to_allocate, got_a_heap_number;
5600 __ bind(&result_not_a_smi);
5601 switch (mode_) {
5602 case OVERWRITE_RIGHT: {
5603 __ tst(r0, Operand(kSmiTagMask));
5604 __ b(eq, &have_to_allocate);
5605 __ mov(r5, Operand(r0));
5606 break;
5607 }
5608 case OVERWRITE_LEFT: {
5609 __ tst(r1, Operand(kSmiTagMask));
5610 __ b(eq, &have_to_allocate);
5611 __ mov(r5, Operand(r1));
5612 break;
5613 }
5614 case NO_OVERWRITE: {
5615 // Get a new heap number in r5. r6 and r7 are scratch.
5616 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5617 }
5618 default: break;
5619 }
5620 __ bind(&got_a_heap_number);
5621 // r2: Answer as signed int32.
5622 // r5: Heap number to write answer into.
5623
5624 // Nothing can go wrong now, so move the heap number to r0, which is the
5625 // result.
5626 __ mov(r0, Operand(r5));
5627
5628 // Tail call that writes the int32 in r2 to the heap number in r0, using
5629 // r3 as scratch. r0 is preserved and returned.
5630 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5631 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5632
5633 if (mode_ != NO_OVERWRITE) {
5634 __ bind(&have_to_allocate);
5635 // Get a new heap number in r5. r6 and r7 are scratch.
5636 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5637 __ jmp(&got_a_heap_number);
5638 }
5639
5640 // If all else failed then we go to the runtime system.
5641 __ bind(&slow);
5642 __ push(r1); // restore stack
5643 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005644 switch (op_) {
5645 case Token::BIT_OR:
5646 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5647 break;
5648 case Token::BIT_AND:
5649 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5650 break;
5651 case Token::BIT_XOR:
5652 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5653 break;
5654 case Token::SAR:
5655 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5656 break;
5657 case Token::SHR:
5658 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5659 break;
5660 case Token::SHL:
5661 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5662 break;
5663 default:
5664 UNREACHABLE();
5665 }
5666}
5667
5668
5669// Can we multiply by x with max two shifts and an add.
5670// This answers yes to all integers from 2 to 10.
5671static bool IsEasyToMultiplyBy(int x) {
5672 if (x < 2) return false; // Avoid special cases.
5673 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5674 if (IsPowerOf2(x)) return true; // Simple shift.
5675 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5676 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5677 return false;
5678}
5679
5680
5681// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5682// Source and destination may be the same register. This routine does
5683// not set carry and overflow the way a mul instruction would.
5684static void MultiplyByKnownInt(MacroAssembler* masm,
5685 Register source,
5686 Register destination,
5687 int known_int) {
5688 if (IsPowerOf2(known_int)) {
5689 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5690 } else if (PopCountLessThanEqual2(known_int)) {
5691 int first_bit = BitPosition(known_int);
5692 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5693 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5694 if (first_bit != 0) {
5695 __ mov(destination, Operand(destination, LSL, first_bit));
5696 }
5697 } else {
5698 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5699 int the_bit = BitPosition(known_int + 1);
5700 __ rsb(destination, source, Operand(source, LSL, the_bit));
5701 }
5702}
5703
5704
5705// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5706// a register for the cases where it doesn't know a good trick, and may deliver
5707// a result that needs shifting.
5708static void MultiplyByKnownInt2(
5709 MacroAssembler* masm,
5710 Register result,
5711 Register source,
5712 Register known_int_register, // Smi tagged.
5713 int known_int,
5714 int* required_shift) { // Including Smi tag shift
5715 switch (known_int) {
5716 case 3:
5717 __ add(result, source, Operand(source, LSL, 1));
5718 *required_shift = 1;
5719 break;
5720 case 5:
5721 __ add(result, source, Operand(source, LSL, 2));
5722 *required_shift = 1;
5723 break;
5724 case 6:
5725 __ add(result, source, Operand(source, LSL, 1));
5726 *required_shift = 2;
5727 break;
5728 case 7:
5729 __ rsb(result, source, Operand(source, LSL, 3));
5730 *required_shift = 1;
5731 break;
5732 case 9:
5733 __ add(result, source, Operand(source, LSL, 3));
5734 *required_shift = 1;
5735 break;
5736 case 10:
5737 __ add(result, source, Operand(source, LSL, 2));
5738 *required_shift = 2;
5739 break;
5740 default:
5741 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5742 __ mul(result, source, known_int_register);
5743 *required_shift = 0;
5744 }
5745}
5746
5747
Leon Clarkee46be812010-01-19 14:06:41 +00005748const char* GenericBinaryOpStub::GetName() {
5749 if (name_ != NULL) return name_;
5750 const int len = 100;
5751 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
5752 if (name_ == NULL) return "OOM";
5753 const char* op_name = Token::Name(op_);
5754 const char* overwrite_name;
5755 switch (mode_) {
5756 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
5757 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
5758 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
5759 default: overwrite_name = "UnknownOverwrite"; break;
5760 }
5761
5762 OS::SNPrintF(Vector<char>(name_, len),
5763 "GenericBinaryOpStub_%s_%s%s",
5764 op_name,
5765 overwrite_name,
5766 specialized_on_rhs_ ? "_ConstantRhs" : 0);
5767 return name_;
5768}
5769
5770
Steve Blocka7e24c12009-10-30 11:49:00 +00005771void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5772 // r1 : x
5773 // r0 : y
5774 // result : r0
5775
5776 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5777 // tell us that.
5778 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5779
5780 switch (op_) {
5781 case Token::ADD: {
5782 Label not_smi;
5783 // Fast path.
5784 ASSERT(kSmiTag == 0); // Adjust code below.
5785 __ tst(r2, Operand(kSmiTagMask));
5786 __ b(ne, &not_smi);
5787 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5788 // Return if no overflow.
5789 __ Ret(vc);
5790 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5791
5792 HandleBinaryOpSlowCases(masm,
5793 &not_smi,
5794 Builtins::ADD,
5795 Token::ADD,
5796 mode_);
5797 break;
5798 }
5799
5800 case Token::SUB: {
5801 Label not_smi;
5802 // Fast path.
5803 ASSERT(kSmiTag == 0); // Adjust code below.
5804 __ tst(r2, Operand(kSmiTagMask));
5805 __ b(ne, &not_smi);
5806 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5807 // Return if no overflow.
5808 __ Ret(vc);
5809 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5810
5811 HandleBinaryOpSlowCases(masm,
5812 &not_smi,
5813 Builtins::SUB,
5814 Token::SUB,
5815 mode_);
5816 break;
5817 }
5818
5819 case Token::MUL: {
5820 Label not_smi, slow;
5821 ASSERT(kSmiTag == 0); // adjust code below
5822 __ tst(r2, Operand(kSmiTagMask));
5823 __ b(ne, &not_smi);
5824 // Remove tag from one operand (but keep sign), so that result is Smi.
5825 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
5826 // Do multiplication
5827 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
5828 // Go slow on overflows (overflow bit is not set).
5829 __ mov(ip, Operand(r3, ASR, 31));
5830 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
5831 __ b(ne, &slow);
5832 // Go slow on zero result to handle -0.
5833 __ tst(r3, Operand(r3));
5834 __ mov(r0, Operand(r3), LeaveCC, ne);
5835 __ Ret(ne);
5836 // We need -0 if we were multiplying a negative number with 0 to get 0.
5837 // We know one of them was zero.
5838 __ add(r2, r0, Operand(r1), SetCC);
5839 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5840 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5841 // Slow case. We fall through here if we multiplied a negative number
5842 // with 0, because that would mean we should produce -0.
5843 __ bind(&slow);
5844
5845 HandleBinaryOpSlowCases(masm,
5846 &not_smi,
5847 Builtins::MUL,
5848 Token::MUL,
5849 mode_);
5850 break;
5851 }
5852
5853 case Token::DIV:
5854 case Token::MOD: {
5855 Label not_smi;
5856 if (specialized_on_rhs_) {
5857 Label smi_is_unsuitable;
5858 __ BranchOnNotSmi(r1, &not_smi);
5859 if (IsPowerOf2(constant_rhs_)) {
5860 if (op_ == Token::MOD) {
5861 __ and_(r0,
5862 r1,
5863 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
5864 SetCC);
5865 // We now have the answer, but if the input was negative we also
5866 // have the sign bit. Our work is done if the result is
5867 // positive or zero:
5868 __ Ret(pl);
5869 // A mod of a negative left hand side must return a negative number.
5870 // Unfortunately if the answer is 0 then we must return -0. And we
5871 // already optimistically trashed r0 so we may need to restore it.
5872 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5873 // Next two instructions are conditional on the answer being -0.
5874 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
5875 __ b(eq, &smi_is_unsuitable);
5876 // We need to subtract the dividend. Eg. -3 % 4 == -3.
5877 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
5878 } else {
5879 ASSERT(op_ == Token::DIV);
5880 __ tst(r1,
5881 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
5882 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5883 int shift = 0;
5884 int d = constant_rhs_;
5885 while ((d & 1) == 0) {
5886 d >>= 1;
5887 shift++;
5888 }
5889 __ mov(r0, Operand(r1, LSR, shift));
5890 __ bic(r0, r0, Operand(kSmiTagMask));
5891 }
5892 } else {
5893 // Not a power of 2.
5894 __ tst(r1, Operand(0x80000000u));
5895 __ b(ne, &smi_is_unsuitable);
5896 // Find a fixed point reciprocal of the divisor so we can divide by
5897 // multiplying.
5898 double divisor = 1.0 / constant_rhs_;
5899 int shift = 32;
5900 double scale = 4294967296.0; // 1 << 32.
5901 uint32_t mul;
5902 // Maximise the precision of the fixed point reciprocal.
5903 while (true) {
5904 mul = static_cast<uint32_t>(scale * divisor);
5905 if (mul >= 0x7fffffff) break;
5906 scale *= 2.0;
5907 shift++;
5908 }
5909 mul++;
5910 __ mov(r2, Operand(mul));
5911 __ umull(r3, r2, r2, r1);
5912 __ mov(r2, Operand(r2, LSR, shift - 31));
5913 // r2 is r1 / rhs. r2 is not Smi tagged.
5914 // r0 is still the known rhs. r0 is Smi tagged.
5915 // r1 is still the unkown lhs. r1 is Smi tagged.
5916 int required_r4_shift = 0; // Including the Smi tag shift of 1.
5917 // r4 = r2 * r0.
5918 MultiplyByKnownInt2(masm,
5919 r4,
5920 r2,
5921 r0,
5922 constant_rhs_,
5923 &required_r4_shift);
5924 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
5925 if (op_ == Token::DIV) {
5926 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
5927 __ b(ne, &smi_is_unsuitable); // There was a remainder.
5928 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5929 } else {
5930 ASSERT(op_ == Token::MOD);
5931 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
5932 }
5933 }
5934 __ Ret();
5935 __ bind(&smi_is_unsuitable);
5936 } else {
5937 __ jmp(&not_smi);
5938 }
5939 HandleBinaryOpSlowCases(masm,
5940 &not_smi,
5941 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
5942 op_,
5943 mode_);
5944 break;
5945 }
5946
5947 case Token::BIT_OR:
5948 case Token::BIT_AND:
5949 case Token::BIT_XOR:
5950 case Token::SAR:
5951 case Token::SHR:
5952 case Token::SHL: {
5953 Label slow;
5954 ASSERT(kSmiTag == 0); // adjust code below
5955 __ tst(r2, Operand(kSmiTagMask));
5956 __ b(ne, &slow);
5957 switch (op_) {
5958 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
5959 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
5960 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
5961 case Token::SAR:
5962 // Remove tags from right operand.
5963 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5964 // Use only the 5 least significant bits of the shift count.
5965 __ and_(r2, r2, Operand(0x1f));
5966 __ mov(r0, Operand(r1, ASR, r2));
5967 // Smi tag result.
5968 __ bic(r0, r0, Operand(kSmiTagMask));
5969 break;
5970 case Token::SHR:
5971 // Remove tags from operands. We can't do this on a 31 bit number
5972 // because then the 0s get shifted into bit 30 instead of bit 31.
5973 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5974 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5975 // Use only the 5 least significant bits of the shift count.
5976 __ and_(r2, r2, Operand(0x1f));
5977 __ mov(r3, Operand(r3, LSR, r2));
5978 // Unsigned shift is not allowed to produce a negative number, so
5979 // check the sign bit and the sign bit after Smi tagging.
5980 __ tst(r3, Operand(0xc0000000));
5981 __ b(ne, &slow);
5982 // Smi tag result.
5983 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5984 break;
5985 case Token::SHL:
5986 // Remove tags from operands.
5987 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5988 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5989 // Use only the 5 least significant bits of the shift count.
5990 __ and_(r2, r2, Operand(0x1f));
5991 __ mov(r3, Operand(r3, LSL, r2));
5992 // Check that the signed result fits in a Smi.
5993 __ add(r2, r3, Operand(0x40000000), SetCC);
5994 __ b(mi, &slow);
5995 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
5996 break;
5997 default: UNREACHABLE();
5998 }
5999 __ Ret();
6000 __ bind(&slow);
6001 HandleNonSmiBitwiseOp(masm);
6002 break;
6003 }
6004
6005 default: UNREACHABLE();
6006 }
6007 // This code should be unreachable.
6008 __ stop("Unreachable");
6009}
6010
6011
6012void StackCheckStub::Generate(MacroAssembler* masm) {
6013 // Do tail-call to runtime routine. Runtime routines expect at least one
6014 // argument, so give it a Smi.
6015 __ mov(r0, Operand(Smi::FromInt(0)));
6016 __ push(r0);
6017 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
6018
6019 __ StubReturn(1);
6020}
6021
6022
Leon Clarkee46be812010-01-19 14:06:41 +00006023void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
6024 ASSERT(op_ == Token::SUB);
6025
Steve Blocka7e24c12009-10-30 11:49:00 +00006026 Label undo;
6027 Label slow;
6028 Label not_smi;
6029
6030 // Enter runtime system if the value is not a smi.
6031 __ tst(r0, Operand(kSmiTagMask));
6032 __ b(ne, &not_smi);
6033
6034 // Enter runtime system if the value of the expression is zero
6035 // to make sure that we switch between 0 and -0.
6036 __ cmp(r0, Operand(0));
6037 __ b(eq, &slow);
6038
6039 // The value of the expression is a smi that is not zero. Try
6040 // optimistic subtraction '0 - value'.
6041 __ rsb(r1, r0, Operand(0), SetCC);
6042 __ b(vs, &slow);
6043
6044 __ mov(r0, Operand(r1)); // Set r0 to result.
6045 __ StubReturn(1);
6046
6047 // Enter runtime system.
6048 __ bind(&slow);
6049 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006050 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
6051
6052 __ bind(&not_smi);
6053 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6054 __ b(ne, &slow);
6055 // r0 is a heap number. Get a new heap number in r1.
6056 if (overwrite_) {
6057 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6058 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6059 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6060 } else {
6061 AllocateHeapNumber(masm, &slow, r1, r2, r3);
6062 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6063 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6064 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
6065 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6066 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
6067 __ mov(r0, Operand(r1));
6068 }
6069 __ StubReturn(1);
6070}
6071
6072
6073int CEntryStub::MinorKey() {
6074 ASSERT(result_size_ <= 2);
6075 // Result returned in r0 or r0+r1 by default.
6076 return 0;
6077}
6078
6079
6080void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6081 // r0 holds the exception.
6082
6083 // Adjust this code if not the case.
6084 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6085
6086 // Drop the sp to the top of the handler.
6087 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6088 __ ldr(sp, MemOperand(r3));
6089
6090 // Restore the next handler and frame pointer, discard handler state.
6091 ASSERT(StackHandlerConstants::kNextOffset == 0);
6092 __ pop(r2);
6093 __ str(r2, MemOperand(r3));
6094 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6095 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
6096
6097 // Before returning we restore the context from the frame pointer if
6098 // not NULL. The frame pointer is NULL in the exception handler of a
6099 // JS entry frame.
6100 __ cmp(fp, Operand(0));
6101 // Set cp to NULL if fp is NULL.
6102 __ mov(cp, Operand(0), LeaveCC, eq);
6103 // Restore cp otherwise.
6104 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6105#ifdef DEBUG
6106 if (FLAG_debug_code) {
6107 __ mov(lr, Operand(pc));
6108 }
6109#endif
6110 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6111 __ pop(pc);
6112}
6113
6114
6115void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6116 UncatchableExceptionType type) {
6117 // Adjust this code if not the case.
6118 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6119
6120 // Drop sp to the top stack handler.
6121 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6122 __ ldr(sp, MemOperand(r3));
6123
6124 // Unwind the handlers until the ENTRY handler is found.
6125 Label loop, done;
6126 __ bind(&loop);
6127 // Load the type of the current stack handler.
6128 const int kStateOffset = StackHandlerConstants::kStateOffset;
6129 __ ldr(r2, MemOperand(sp, kStateOffset));
6130 __ cmp(r2, Operand(StackHandler::ENTRY));
6131 __ b(eq, &done);
6132 // Fetch the next handler in the list.
6133 const int kNextOffset = StackHandlerConstants::kNextOffset;
6134 __ ldr(sp, MemOperand(sp, kNextOffset));
6135 __ jmp(&loop);
6136 __ bind(&done);
6137
6138 // Set the top handler address to next handler past the current ENTRY handler.
6139 ASSERT(StackHandlerConstants::kNextOffset == 0);
6140 __ pop(r2);
6141 __ str(r2, MemOperand(r3));
6142
6143 if (type == OUT_OF_MEMORY) {
6144 // Set external caught exception to false.
6145 ExternalReference external_caught(Top::k_external_caught_exception_address);
6146 __ mov(r0, Operand(false));
6147 __ mov(r2, Operand(external_caught));
6148 __ str(r0, MemOperand(r2));
6149
6150 // Set pending exception and r0 to out of memory exception.
6151 Failure* out_of_memory = Failure::OutOfMemoryException();
6152 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6153 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
6154 __ str(r0, MemOperand(r2));
6155 }
6156
6157 // Stack layout at this point. See also StackHandlerConstants.
6158 // sp -> state (ENTRY)
6159 // fp
6160 // lr
6161
6162 // Discard handler state (r2 is not used) and restore frame pointer.
6163 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6164 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
6165 // Before returning we restore the context from the frame pointer if
6166 // not NULL. The frame pointer is NULL in the exception handler of a
6167 // JS entry frame.
6168 __ cmp(fp, Operand(0));
6169 // Set cp to NULL if fp is NULL.
6170 __ mov(cp, Operand(0), LeaveCC, eq);
6171 // Restore cp otherwise.
6172 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6173#ifdef DEBUG
6174 if (FLAG_debug_code) {
6175 __ mov(lr, Operand(pc));
6176 }
6177#endif
6178 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6179 __ pop(pc);
6180}
6181
6182
6183void CEntryStub::GenerateCore(MacroAssembler* masm,
6184 Label* throw_normal_exception,
6185 Label* throw_termination_exception,
6186 Label* throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006187 ExitFrame::Mode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006188 bool do_gc,
6189 bool always_allocate) {
6190 // r0: result parameter for PerformGC, if any
6191 // r4: number of arguments including receiver (C callee-saved)
6192 // r5: pointer to builtin function (C callee-saved)
6193 // r6: pointer to the first argument (C callee-saved)
6194
6195 if (do_gc) {
6196 // Passing r0.
6197 ExternalReference gc_reference = ExternalReference::perform_gc_function();
6198 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
6199 }
6200
6201 ExternalReference scope_depth =
6202 ExternalReference::heap_always_allocate_scope_depth();
6203 if (always_allocate) {
6204 __ mov(r0, Operand(scope_depth));
6205 __ ldr(r1, MemOperand(r0));
6206 __ add(r1, r1, Operand(1));
6207 __ str(r1, MemOperand(r0));
6208 }
6209
6210 // Call C built-in.
6211 // r0 = argc, r1 = argv
6212 __ mov(r0, Operand(r4));
6213 __ mov(r1, Operand(r6));
6214
6215 // TODO(1242173): To let the GC traverse the return address of the exit
6216 // frames, we need to know where the return address is. Right now,
6217 // we push it on the stack to be able to find it again, but we never
6218 // restore from it in case of changes, which makes it impossible to
6219 // support moving the C entry code stub. This should be fixed, but currently
6220 // this is OK because the CEntryStub gets generated so early in the V8 boot
6221 // sequence that it is not moving ever.
6222 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
6223 masm->push(lr);
6224 masm->Jump(r5);
6225
6226 if (always_allocate) {
6227 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
6228 // though (contain the result).
6229 __ mov(r2, Operand(scope_depth));
6230 __ ldr(r3, MemOperand(r2));
6231 __ sub(r3, r3, Operand(1));
6232 __ str(r3, MemOperand(r2));
6233 }
6234
6235 // check for failure result
6236 Label failure_returned;
6237 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6238 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
6239 __ add(r2, r0, Operand(1));
6240 __ tst(r2, Operand(kFailureTagMask));
6241 __ b(eq, &failure_returned);
6242
6243 // Exit C frame and return.
6244 // r0:r1: result
6245 // sp: stack pointer
6246 // fp: frame pointer
Steve Blockd0582a62009-12-15 09:54:21 +00006247 __ LeaveExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006248
6249 // check if we should retry or throw exception
6250 Label retry;
6251 __ bind(&failure_returned);
6252 ASSERT(Failure::RETRY_AFTER_GC == 0);
6253 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6254 __ b(eq, &retry);
6255
6256 // Special handling of out of memory exceptions.
6257 Failure* out_of_memory = Failure::OutOfMemoryException();
6258 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6259 __ b(eq, throw_out_of_memory_exception);
6260
6261 // Retrieve the pending exception and clear the variable.
6262 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6263 __ ldr(r3, MemOperand(ip));
6264 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6265 __ ldr(r0, MemOperand(ip));
6266 __ str(r3, MemOperand(ip));
6267
6268 // Special handling of termination exceptions which are uncatchable
6269 // by javascript code.
6270 __ cmp(r0, Operand(Factory::termination_exception()));
6271 __ b(eq, throw_termination_exception);
6272
6273 // Handle normal exception.
6274 __ jmp(throw_normal_exception);
6275
6276 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
6277}
6278
6279
6280void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
6281 // Called from JavaScript; parameters are on stack as if calling JS function
6282 // r0: number of arguments including receiver
6283 // r1: pointer to builtin function
6284 // fp: frame pointer (restored after C call)
6285 // sp: stack pointer (restored as callee's sp after C call)
6286 // cp: current context (C callee-saved)
6287
6288 // NOTE: Invocations of builtins may return failure objects
6289 // instead of a proper result. The builtin entry handles
6290 // this by performing a garbage collection and retrying the
6291 // builtin once.
6292
Steve Blockd0582a62009-12-15 09:54:21 +00006293 ExitFrame::Mode mode = is_debug_break
6294 ? ExitFrame::MODE_DEBUG
6295 : ExitFrame::MODE_NORMAL;
Steve Blocka7e24c12009-10-30 11:49:00 +00006296
6297 // Enter the exit frame that transitions from JavaScript to C++.
Steve Blockd0582a62009-12-15 09:54:21 +00006298 __ EnterExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006299
6300 // r4: number of arguments (C callee-saved)
6301 // r5: pointer to builtin function (C callee-saved)
6302 // r6: pointer to first argument (C callee-saved)
6303
6304 Label throw_normal_exception;
6305 Label throw_termination_exception;
6306 Label throw_out_of_memory_exception;
6307
6308 // Call into the runtime system.
6309 GenerateCore(masm,
6310 &throw_normal_exception,
6311 &throw_termination_exception,
6312 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006313 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006314 false,
6315 false);
6316
6317 // Do space-specific GC and retry runtime call.
6318 GenerateCore(masm,
6319 &throw_normal_exception,
6320 &throw_termination_exception,
6321 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006322 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006323 true,
6324 false);
6325
6326 // Do full GC and retry runtime call one final time.
6327 Failure* failure = Failure::InternalError();
6328 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
6329 GenerateCore(masm,
6330 &throw_normal_exception,
6331 &throw_termination_exception,
6332 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006333 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006334 true,
6335 true);
6336
6337 __ bind(&throw_out_of_memory_exception);
6338 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
6339
6340 __ bind(&throw_termination_exception);
6341 GenerateThrowUncatchable(masm, TERMINATION);
6342
6343 __ bind(&throw_normal_exception);
6344 GenerateThrowTOS(masm);
6345}
6346
6347
6348void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
6349 // r0: code entry
6350 // r1: function
6351 // r2: receiver
6352 // r3: argc
6353 // [sp+0]: argv
6354
6355 Label invoke, exit;
6356
6357 // Called from C, so do not pop argc and args on exit (preserve sp)
6358 // No need to save register-passed args
6359 // Save callee-saved registers (incl. cp and fp), sp, and lr
6360 __ stm(db_w, sp, kCalleeSaved | lr.bit());
6361
6362 // Get address of argv, see stm above.
6363 // r0: code entry
6364 // r1: function
6365 // r2: receiver
6366 // r3: argc
6367 __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
6368 __ ldr(r4, MemOperand(r4)); // argv
6369
6370 // Push a frame with special values setup to mark it as an entry frame.
6371 // r0: code entry
6372 // r1: function
6373 // r2: receiver
6374 // r3: argc
6375 // r4: argv
6376 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
6377 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
6378 __ mov(r7, Operand(Smi::FromInt(marker)));
6379 __ mov(r6, Operand(Smi::FromInt(marker)));
6380 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6381 __ ldr(r5, MemOperand(r5));
6382 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
6383
6384 // Setup frame pointer for the frame to be pushed.
6385 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6386
6387 // Call a faked try-block that does the invoke.
6388 __ bl(&invoke);
6389
6390 // Caught exception: Store result (exception) in the pending
6391 // exception field in the JSEnv and return a failure sentinel.
6392 // Coming in here the fp will be invalid because the PushTryHandler below
6393 // sets it to 0 to signal the existence of the JSEntry frame.
6394 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6395 __ str(r0, MemOperand(ip));
6396 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6397 __ b(&exit);
6398
6399 // Invoke: Link this frame into the handler chain.
6400 __ bind(&invoke);
6401 // Must preserve r0-r4, r5-r7 are available.
6402 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6403 // If an exception not caught by another handler occurs, this handler
6404 // returns control to the code after the bl(&invoke) above, which
6405 // restores all kCalleeSaved registers (including cp and fp) to their
6406 // saved values before returning a failure to C.
6407
6408 // Clear any pending exceptions.
6409 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6410 __ ldr(r5, MemOperand(ip));
6411 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6412 __ str(r5, MemOperand(ip));
6413
6414 // Invoke the function by calling through JS entry trampoline builtin.
6415 // Notice that we cannot store a reference to the trampoline code directly in
6416 // this stub, because runtime stubs are not traversed when doing GC.
6417
6418 // Expected registers by Builtins::JSEntryTrampoline
6419 // r0: code entry
6420 // r1: function
6421 // r2: receiver
6422 // r3: argc
6423 // r4: argv
6424 if (is_construct) {
6425 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6426 __ mov(ip, Operand(construct_entry));
6427 } else {
6428 ExternalReference entry(Builtins::JSEntryTrampoline);
6429 __ mov(ip, Operand(entry));
6430 }
6431 __ ldr(ip, MemOperand(ip)); // deref address
6432
6433 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6434 // macro for the add instruction because we don't want the coverage tool
6435 // inserting instructions here after we read the pc.
6436 __ mov(lr, Operand(pc));
6437 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6438
6439 // Unlink this frame from the handler chain. When reading the
6440 // address of the next handler, there is no need to use the address
6441 // displacement since the current stack pointer (sp) points directly
6442 // to the stack handler.
6443 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6444 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6445 __ str(r3, MemOperand(ip));
6446 // No need to restore registers
6447 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6448
6449
6450 __ bind(&exit); // r0 holds result
6451 // Restore the top frame descriptors from the stack.
6452 __ pop(r3);
6453 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6454 __ str(r3, MemOperand(ip));
6455
6456 // Reset the stack to the callee saved registers.
6457 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6458
6459 // Restore callee-saved registers and return.
6460#ifdef DEBUG
6461 if (FLAG_debug_code) {
6462 __ mov(lr, Operand(pc));
6463 }
6464#endif
6465 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6466}
6467
6468
6469// This stub performs an instanceof, calling the builtin function if
6470// necessary. Uses r1 for the object, r0 for the function that it may
6471// be an instance of (these are fetched from the stack).
6472void InstanceofStub::Generate(MacroAssembler* masm) {
6473 // Get the object - slow case for smis (we may need to throw an exception
6474 // depending on the rhs).
6475 Label slow, loop, is_instance, is_not_instance;
6476 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6477 __ BranchOnSmi(r0, &slow);
6478
6479 // Check that the left hand is a JS object and put map in r3.
6480 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6481 __ b(lt, &slow);
6482 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6483 __ b(gt, &slow);
6484
6485 // Get the prototype of the function (r4 is result, r2 is scratch).
6486 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6487 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6488
6489 // Check that the function prototype is a JS object.
6490 __ BranchOnSmi(r4, &slow);
6491 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6492 __ b(lt, &slow);
6493 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6494 __ b(gt, &slow);
6495
6496 // Register mapping: r3 is object map and r4 is function prototype.
6497 // Get prototype of object into r2.
6498 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6499
6500 // Loop through the prototype chain looking for the function prototype.
6501 __ bind(&loop);
6502 __ cmp(r2, Operand(r4));
6503 __ b(eq, &is_instance);
6504 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6505 __ cmp(r2, ip);
6506 __ b(eq, &is_not_instance);
6507 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6508 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6509 __ jmp(&loop);
6510
6511 __ bind(&is_instance);
6512 __ mov(r0, Operand(Smi::FromInt(0)));
6513 __ pop();
6514 __ pop();
6515 __ mov(pc, Operand(lr)); // Return.
6516
6517 __ bind(&is_not_instance);
6518 __ mov(r0, Operand(Smi::FromInt(1)));
6519 __ pop();
6520 __ pop();
6521 __ mov(pc, Operand(lr)); // Return.
6522
6523 // Slow-case. Tail call builtin.
6524 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006525 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6526}
6527
6528
6529void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6530 // Check if the calling frame is an arguments adaptor frame.
6531 Label adaptor;
6532 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6533 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6534 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6535 __ b(eq, &adaptor);
6536
6537 // Nothing to do: The formal number of parameters has already been
6538 // passed in register r0 by calling function. Just return it.
6539 __ Jump(lr);
6540
6541 // Arguments adaptor case: Read the arguments length from the
6542 // adaptor frame and return it.
6543 __ bind(&adaptor);
6544 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6545 __ Jump(lr);
6546}
6547
6548
6549void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6550 // The displacement is the offset of the last parameter (if any)
6551 // relative to the frame pointer.
6552 static const int kDisplacement =
6553 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6554
6555 // Check that the key is a smi.
6556 Label slow;
6557 __ BranchOnNotSmi(r1, &slow);
6558
6559 // Check if the calling frame is an arguments adaptor frame.
6560 Label adaptor;
6561 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6562 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6563 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6564 __ b(eq, &adaptor);
6565
6566 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00006567 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00006568 // check for free.
6569 __ cmp(r1, r0);
6570 __ b(cs, &slow);
6571
6572 // Read the argument from the stack and return it.
6573 __ sub(r3, r0, r1);
6574 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6575 __ ldr(r0, MemOperand(r3, kDisplacement));
6576 __ Jump(lr);
6577
6578 // Arguments adaptor case: Check index against actual arguments
6579 // limit found in the arguments adaptor frame. Use unsigned
6580 // comparison to get negative check for free.
6581 __ bind(&adaptor);
6582 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6583 __ cmp(r1, r0);
6584 __ b(cs, &slow);
6585
6586 // Read the argument from the adaptor frame and return it.
6587 __ sub(r3, r0, r1);
6588 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6589 __ ldr(r0, MemOperand(r3, kDisplacement));
6590 __ Jump(lr);
6591
6592 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6593 // by calling the runtime system.
6594 __ bind(&slow);
6595 __ push(r1);
6596 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6597}
6598
6599
6600void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6601 // Check if the calling frame is an arguments adaptor frame.
6602 Label runtime;
6603 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6604 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6605 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6606 __ b(ne, &runtime);
6607
6608 // Patch the arguments.length and the parameters pointer.
6609 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6610 __ str(r0, MemOperand(sp, 0 * kPointerSize));
6611 __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6612 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6613 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6614
6615 // Do the runtime call to allocate the arguments object.
6616 __ bind(&runtime);
6617 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6618}
6619
6620
6621void CallFunctionStub::Generate(MacroAssembler* masm) {
6622 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00006623
6624 // If the receiver might be a value (string, number or boolean) check for this
6625 // and box it if it is.
6626 if (ReceiverMightBeValue()) {
6627 // Get the receiver from the stack.
6628 // function, receiver [, arguments]
6629 Label receiver_is_value, receiver_is_js_object;
6630 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
6631
6632 // Check if receiver is a smi (which is a number value).
6633 __ BranchOnSmi(r1, &receiver_is_value);
6634
6635 // Check if the receiver is a valid JS object.
6636 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
6637 __ b(ge, &receiver_is_js_object);
6638
6639 // Call the runtime to box the value.
6640 __ bind(&receiver_is_value);
6641 __ EnterInternalFrame();
6642 __ push(r1);
6643 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
6644 __ LeaveInternalFrame();
6645 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
6646
6647 __ bind(&receiver_is_js_object);
6648 }
6649
Steve Blocka7e24c12009-10-30 11:49:00 +00006650 // Get the function to call from the stack.
6651 // function, receiver [, arguments]
6652 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6653
6654 // Check that the function is really a JavaScript function.
6655 // r1: pushed function (to be verified)
6656 __ BranchOnSmi(r1, &slow);
6657 // Get the map of the function object.
6658 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6659 __ b(ne, &slow);
6660
6661 // Fast-case: Invoke the function now.
6662 // r1: pushed function
6663 ParameterCount actual(argc_);
6664 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6665
6666 // Slow-case: Non-function called.
6667 __ bind(&slow);
6668 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6669 __ mov(r2, Operand(0));
6670 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6671 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6672 RelocInfo::CODE_TARGET);
6673}
6674
6675
Leon Clarkee46be812010-01-19 14:06:41 +00006676const char* CompareStub::GetName() {
6677 switch (cc_) {
6678 case lt: return "CompareStub_LT";
6679 case gt: return "CompareStub_GT";
6680 case le: return "CompareStub_LE";
6681 case ge: return "CompareStub_GE";
6682 case ne: {
6683 if (strict_) {
6684 if (never_nan_nan_) {
6685 return "CompareStub_NE_STRICT_NO_NAN";
6686 } else {
6687 return "CompareStub_NE_STRICT";
6688 }
6689 } else {
6690 if (never_nan_nan_) {
6691 return "CompareStub_NE_NO_NAN";
6692 } else {
6693 return "CompareStub_NE";
6694 }
6695 }
6696 }
6697 case eq: {
6698 if (strict_) {
6699 if (never_nan_nan_) {
6700 return "CompareStub_EQ_STRICT_NO_NAN";
6701 } else {
6702 return "CompareStub_EQ_STRICT";
6703 }
6704 } else {
6705 if (never_nan_nan_) {
6706 return "CompareStub_EQ_NO_NAN";
6707 } else {
6708 return "CompareStub_EQ";
6709 }
6710 }
6711 }
6712 default: return "CompareStub";
6713 }
6714}
6715
6716
Steve Blocka7e24c12009-10-30 11:49:00 +00006717int CompareStub::MinorKey() {
Leon Clarkee46be812010-01-19 14:06:41 +00006718 // Encode the three parameters in a unique 16 bit value.
6719 ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
6720 int nnn_value = (never_nan_nan_ ? 2 : 0);
6721 if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
6722 return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006723}
6724
6725
6726#undef __
6727
6728} } // namespace v8::internal