blob: 38f08d1fa9629a3dd1c77736854bfb555a8b250d [file] [log] [blame]
Leon Clarked91b9f72010-01-27 17:25:45 +00001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39
40namespace v8 {
41namespace internal {
42
43#define __ ACCESS_MASM(masm_)
44
45static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000047 Condition cc,
48 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000049static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000050 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000051 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
55static void MultiplyByKnownInt(MacroAssembler* masm,
56 Register source,
57 Register destination,
58 int known_int);
59static bool IsEasyToMultiplyBy(int x);
60
61
62
63// -------------------------------------------------------------------------
64// Platform-specific DeferredCode functions.
65
66void DeferredCode::SaveRegisters() {
67 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
68 int action = registers_[i];
69 if (action == kPush) {
70 __ push(RegisterAllocator::ToRegister(i));
71 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
72 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
73 }
74 }
75}
76
77
78void DeferredCode::RestoreRegisters() {
79 // Restore registers in reverse order due to the stack.
80 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
81 int action = registers_[i];
82 if (action == kPush) {
83 __ pop(RegisterAllocator::ToRegister(i));
84 } else if (action != kIgnore) {
85 action &= ~kSyncedFlag;
86 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
87 }
88 }
89}
90
91
92// -------------------------------------------------------------------------
93// CodeGenState implementation.
94
95CodeGenState::CodeGenState(CodeGenerator* owner)
96 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000097 true_target_(NULL),
98 false_target_(NULL),
99 previous_(NULL) {
100 owner_->set_state(this);
101}
102
103
104CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 JumpTarget* true_target,
106 JumpTarget* false_target)
107 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 true_target_(true_target),
109 false_target_(false_target),
110 previous_(owner->state()) {
111 owner_->set_state(this);
112}
113
114
115CodeGenState::~CodeGenState() {
116 ASSERT(owner_->state() == this);
117 owner_->set_state(previous_);
118}
119
120
121// -------------------------------------------------------------------------
122// CodeGenerator implementation
123
124CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
125 bool is_eval)
126 : is_eval_(is_eval),
127 script_(script),
128 deferred_(8),
129 masm_(new MacroAssembler(NULL, buffer_size)),
130 scope_(NULL),
131 frame_(NULL),
132 allocator_(NULL),
133 cc_reg_(al),
134 state_(NULL),
135 function_return_is_shadowed_(false) {
136}
137
138
139// Calling conventions:
140// fp: caller's frame pointer
141// sp: stack pointer
142// r1: called JS function
143// cp: callee's context
144
145void CodeGenerator::GenCode(FunctionLiteral* fun) {
Steve Blockd0582a62009-12-15 09:54:21 +0000146 // Record the position for debugging purposes.
147 CodeForFunctionPosition(fun);
148
Steve Blocka7e24c12009-10-30 11:49:00 +0000149 ZoneList<Statement*>* body = fun->body();
150
151 // Initialize state.
152 ASSERT(scope_ == NULL);
153 scope_ = fun->scope();
154 ASSERT(allocator_ == NULL);
155 RegisterAllocator register_allocator(this);
156 allocator_ = &register_allocator;
157 ASSERT(frame_ == NULL);
158 frame_ = new VirtualFrame();
159 cc_reg_ = al;
160 {
161 CodeGenState state(this);
162
163 // Entry:
164 // Stack: receiver, arguments
165 // lr: return address
166 // fp: caller's frame pointer
167 // sp: stack pointer
168 // r1: called JS function
169 // cp: callee's context
170 allocator_->Initialize();
171 frame_->Enter();
172 // tos: code slot
173#ifdef DEBUG
174 if (strlen(FLAG_stop_at) > 0 &&
175 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
176 frame_->SpillAll();
177 __ stop("stop-at");
178 }
179#endif
180
181 // Allocate space for locals and initialize them. This also checks
182 // for stack overflow.
183 frame_->AllocateStackSlots();
184 // Initialize the function return target after the locals are set
185 // up, because it needs the expected frame height from the frame.
186 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
187 function_return_is_shadowed_ = false;
188
189 VirtualFrame::SpilledScope spilled_scope;
Leon Clarkee46be812010-01-19 14:06:41 +0000190 int heap_slots = scope_->num_heap_slots();
191 if (heap_slots > 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000192 // Allocate local context.
193 // Get outer context and create a new context based on it.
194 __ ldr(r0, frame_->Function());
195 frame_->EmitPush(r0);
Leon Clarkee46be812010-01-19 14:06:41 +0000196 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
197 FastNewContextStub stub(heap_slots);
198 frame_->CallStub(&stub, 1);
199 } else {
200 frame_->CallRuntime(Runtime::kNewContext, 1);
201 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000202
203#ifdef DEBUG
204 JumpTarget verified_true;
205 __ cmp(r0, Operand(cp));
206 verified_true.Branch(eq);
207 __ stop("NewContext: r0 is expected to be the same as cp");
208 verified_true.Bind();
209#endif
210 // Update context local.
211 __ str(cp, frame_->Context());
212 }
213
214 // TODO(1241774): Improve this code:
215 // 1) only needed if we have a context
216 // 2) no need to recompute context ptr every single time
217 // 3) don't copy parameter operand code from SlotOperand!
218 {
219 Comment cmnt2(masm_, "[ copy context parameters into .context");
220
221 // Note that iteration order is relevant here! If we have the same
222 // parameter twice (e.g., function (x, y, x)), and that parameter
223 // needs to be copied into the context, it must be the last argument
224 // passed to the parameter that needs to be copied. This is a rare
225 // case so we don't check for it, instead we rely on the copying
226 // order: such a parameter is copied repeatedly into the same
227 // context location and thus the last value is what is seen inside
228 // the function.
229 for (int i = 0; i < scope_->num_parameters(); i++) {
230 Variable* par = scope_->parameter(i);
231 Slot* slot = par->slot();
232 if (slot != NULL && slot->type() == Slot::CONTEXT) {
233 ASSERT(!scope_->is_global_scope()); // no parameters in global scope
234 __ ldr(r1, frame_->ParameterAt(i));
235 // Loads r2 with context; used below in RecordWrite.
236 __ str(r1, SlotOperand(slot, r2));
237 // Load the offset into r3.
238 int slot_offset =
239 FixedArray::kHeaderSize + slot->index() * kPointerSize;
240 __ mov(r3, Operand(slot_offset));
241 __ RecordWrite(r2, r3, r1);
242 }
243 }
244 }
245
246 // Store the arguments object. This must happen after context
247 // initialization because the arguments object may be stored in the
248 // context.
249 if (scope_->arguments() != NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 Comment cmnt(masm_, "[ allocate arguments object");
Leon Clarkee46be812010-01-19 14:06:41 +0000251 ASSERT(scope_->arguments_shadow() != NULL);
252 Variable* arguments = scope_->arguments()->var();
253 Variable* shadow = scope_->arguments_shadow()->var();
254 ASSERT(arguments != NULL && arguments->slot() != NULL);
255 ASSERT(shadow != NULL && shadow->slot() != NULL);
256 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
257 __ ldr(r2, frame_->Function());
258 // The receiver is below the arguments, the return address, and the
259 // frame pointer on the stack.
260 const int kReceiverDisplacement = 2 + scope_->num_parameters();
261 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
262 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
263 frame_->Adjust(3);
264 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
265 frame_->CallStub(&stub, 3);
266 frame_->EmitPush(r0);
267 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
268 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +0000269 frame_->Drop(); // Value is no longer needed.
270 }
271
Leon Clarkee46be812010-01-19 14:06:41 +0000272 // Initialize ThisFunction reference if present.
273 if (scope_->is_function_scope() && scope_->function() != NULL) {
274 __ mov(ip, Operand(Factory::the_hole_value()));
275 frame_->EmitPush(ip);
276 StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
277 }
278
Steve Blocka7e24c12009-10-30 11:49:00 +0000279 // Generate code to 'execute' declarations and initialize functions
280 // (source elements). In case of an illegal redeclaration we need to
281 // handle that instead of processing the declarations.
282 if (scope_->HasIllegalRedeclaration()) {
283 Comment cmnt(masm_, "[ illegal redeclarations");
284 scope_->VisitIllegalRedeclaration(this);
285 } else {
286 Comment cmnt(masm_, "[ declarations");
287 ProcessDeclarations(scope_->declarations());
288 // Bail out if a stack-overflow exception occurred when processing
289 // declarations.
290 if (HasStackOverflow()) return;
291 }
292
293 if (FLAG_trace) {
294 frame_->CallRuntime(Runtime::kTraceEnter, 0);
295 // Ignore the return value.
296 }
297
298 // Compile the body of the function in a vanilla state. Don't
299 // bother compiling all the code if the scope has an illegal
300 // redeclaration.
301 if (!scope_->HasIllegalRedeclaration()) {
302 Comment cmnt(masm_, "[ function body");
303#ifdef DEBUG
304 bool is_builtin = Bootstrapper::IsActive();
305 bool should_trace =
306 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
307 if (should_trace) {
308 frame_->CallRuntime(Runtime::kDebugTrace, 0);
309 // Ignore the return value.
310 }
311#endif
312 VisitStatementsAndSpill(body);
313 }
314 }
315
316 // Generate the return sequence if necessary.
317 if (has_valid_frame() || function_return_.is_linked()) {
318 if (!function_return_.is_linked()) {
319 CodeForReturnPosition(fun);
320 }
321 // exit
322 // r0: result
323 // sp: stack pointer
324 // fp: frame pointer
325 // cp: callee's context
326 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
327
328 function_return_.Bind();
329 if (FLAG_trace) {
330 // Push the return value on the stack as the parameter.
331 // Runtime::TraceExit returns the parameter as it is.
332 frame_->EmitPush(r0);
333 frame_->CallRuntime(Runtime::kTraceExit, 1);
334 }
335
336 // Add a label for checking the size of the code used for returning.
337 Label check_exit_codesize;
338 masm_->bind(&check_exit_codesize);
339
Steve Blockd0582a62009-12-15 09:54:21 +0000340 // Calculate the exact length of the return sequence and make sure that
341 // the constant pool is not emitted inside of the return sequence.
342 int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
343 int return_sequence_length = Assembler::kJSReturnSequenceLength;
344 if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
345 // Additional mov instruction generated.
346 return_sequence_length++;
347 }
348 masm_->BlockConstPoolFor(return_sequence_length);
349
Steve Blocka7e24c12009-10-30 11:49:00 +0000350 // Tear down the frame which will restore the caller's frame pointer and
351 // the link register.
352 frame_->Exit();
353
354 // Here we use masm_-> instead of the __ macro to avoid the code coverage
355 // tool from instrumenting as we rely on the code size here.
Steve Blockd0582a62009-12-15 09:54:21 +0000356 masm_->add(sp, sp, Operand(sp_delta));
Steve Blocka7e24c12009-10-30 11:49:00 +0000357 masm_->Jump(lr);
358
359 // Check that the size of the code used for returning matches what is
Steve Blockd0582a62009-12-15 09:54:21 +0000360 // expected by the debugger. The add instruction above is an addressing
361 // mode 1 instruction where there are restrictions on which immediate values
362 // can be encoded in the instruction and which immediate values requires
363 // use of an additional instruction for moving the immediate to a temporary
364 // register.
365 ASSERT_EQ(return_sequence_length,
Steve Blocka7e24c12009-10-30 11:49:00 +0000366 masm_->InstructionsGeneratedSince(&check_exit_codesize));
367 }
368
369 // Code generation state must be reset.
370 ASSERT(!has_cc());
371 ASSERT(state_ == NULL);
372 ASSERT(!function_return_is_shadowed_);
373 function_return_.Unuse();
374 DeleteFrame();
375
376 // Process any deferred code using the register allocator.
377 if (!HasStackOverflow()) {
378 ProcessDeferred();
379 }
380
381 allocator_ = NULL;
382 scope_ = NULL;
383}
384
385
386MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
387 // Currently, this assertion will fail if we try to assign to
388 // a constant variable that is constant because it is read-only
389 // (such as the variable referring to a named function expression).
390 // We need to implement assignments to read-only variables.
391 // Ideally, we should do this during AST generation (by converting
392 // such assignments into expression statements); however, in general
393 // we may not be able to make the decision until past AST generation,
394 // that is when the entire program is known.
395 ASSERT(slot != NULL);
396 int index = slot->index();
397 switch (slot->type()) {
398 case Slot::PARAMETER:
399 return frame_->ParameterAt(index);
400
401 case Slot::LOCAL:
402 return frame_->LocalAt(index);
403
404 case Slot::CONTEXT: {
405 // Follow the context chain if necessary.
406 ASSERT(!tmp.is(cp)); // do not overwrite context register
407 Register context = cp;
408 int chain_length = scope()->ContextChainLength(slot->var()->scope());
409 for (int i = 0; i < chain_length; i++) {
410 // Load the closure.
411 // (All contexts, even 'with' contexts, have a closure,
412 // and it is the same for all contexts inside a function.
413 // There is no need to go to the function context first.)
414 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
415 // Load the function context (which is the incoming, outer context).
416 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
417 context = tmp;
418 }
419 // We may have a 'with' context now. Get the function context.
420 // (In fact this mov may never be the needed, since the scope analysis
421 // may not permit a direct context access in this case and thus we are
422 // always at a function context. However it is safe to dereference be-
423 // cause the function context of a function context is itself. Before
424 // deleting this mov we should try to create a counter-example first,
425 // though...)
426 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
427 return ContextOperand(tmp, index);
428 }
429
430 default:
431 UNREACHABLE();
432 return MemOperand(r0, 0);
433 }
434}
435
436
437MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
438 Slot* slot,
439 Register tmp,
440 Register tmp2,
441 JumpTarget* slow) {
442 ASSERT(slot->type() == Slot::CONTEXT);
443 Register context = cp;
444
445 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
446 if (s->num_heap_slots() > 0) {
447 if (s->calls_eval()) {
448 // Check that extension is NULL.
449 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
450 __ tst(tmp2, tmp2);
451 slow->Branch(ne);
452 }
453 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
454 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
455 context = tmp;
456 }
457 }
458 // Check that last extension is NULL.
459 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
460 __ tst(tmp2, tmp2);
461 slow->Branch(ne);
462 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
463 return ContextOperand(tmp, slot->index());
464}
465
466
467// Loads a value on TOS. If it is a boolean value, the result may have been
468// (partially) translated into branches, or it may have set the condition
469// code register. If force_cc is set, the value is forced to set the
470// condition code register and no value is pushed. If the condition code
471// register was set, has_cc() is true and cc_reg_ contains the condition to
472// test for 'true'.
473void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000474 JumpTarget* true_target,
475 JumpTarget* false_target,
476 bool force_cc) {
477 ASSERT(!has_cc());
478 int original_height = frame_->height();
479
Steve Blockd0582a62009-12-15 09:54:21 +0000480 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 Visit(x);
482
483 // If we hit a stack overflow, we may not have actually visited
484 // the expression. In that case, we ensure that we have a
485 // valid-looking frame state because we will continue to generate
486 // code as we unwind the C++ stack.
487 //
488 // It's possible to have both a stack overflow and a valid frame
489 // state (eg, a subexpression overflowed, visiting it returned
490 // with a dummied frame state, and visiting this expression
491 // returned with a normal-looking state).
492 if (HasStackOverflow() &&
493 has_valid_frame() &&
494 !has_cc() &&
495 frame_->height() == original_height) {
496 true_target->Jump();
497 }
498 }
499 if (force_cc && frame_ != NULL && !has_cc()) {
500 // Convert the TOS value to a boolean in the condition code register.
501 ToBoolean(true_target, false_target);
502 }
503 ASSERT(!force_cc || !has_valid_frame() || has_cc());
504 ASSERT(!has_valid_frame() ||
505 (has_cc() && frame_->height() == original_height) ||
506 (!has_cc() && frame_->height() == original_height + 1));
507}
508
509
Steve Blockd0582a62009-12-15 09:54:21 +0000510void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000511#ifdef DEBUG
512 int original_height = frame_->height();
513#endif
514 JumpTarget true_target;
515 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000516 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000517
518 if (has_cc()) {
519 // Convert cc_reg_ into a boolean value.
520 JumpTarget loaded;
521 JumpTarget materialize_true;
522 materialize_true.Branch(cc_reg_);
523 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
524 frame_->EmitPush(r0);
525 loaded.Jump();
526 materialize_true.Bind();
527 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
528 frame_->EmitPush(r0);
529 loaded.Bind();
530 cc_reg_ = al;
531 }
532
533 if (true_target.is_linked() || false_target.is_linked()) {
534 // We have at least one condition value that has been "translated"
535 // into a branch, thus it needs to be loaded explicitly.
536 JumpTarget loaded;
537 if (frame_ != NULL) {
538 loaded.Jump(); // Don't lose the current TOS.
539 }
540 bool both = true_target.is_linked() && false_target.is_linked();
541 // Load "true" if necessary.
542 if (true_target.is_linked()) {
543 true_target.Bind();
544 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
545 frame_->EmitPush(r0);
546 }
547 // If both "true" and "false" need to be loaded jump across the code for
548 // "false".
549 if (both) {
550 loaded.Jump();
551 }
552 // Load "false" if necessary.
553 if (false_target.is_linked()) {
554 false_target.Bind();
555 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
556 frame_->EmitPush(r0);
557 }
558 // A value is loaded on all paths reaching this point.
559 loaded.Bind();
560 }
561 ASSERT(has_valid_frame());
562 ASSERT(!has_cc());
563 ASSERT(frame_->height() == original_height + 1);
564}
565
566
567void CodeGenerator::LoadGlobal() {
568 VirtualFrame::SpilledScope spilled_scope;
569 __ ldr(r0, GlobalObject());
570 frame_->EmitPush(r0);
571}
572
573
574void CodeGenerator::LoadGlobalReceiver(Register scratch) {
575 VirtualFrame::SpilledScope spilled_scope;
576 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
577 __ ldr(scratch,
578 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
579 frame_->EmitPush(scratch);
580}
581
582
Steve Blockd0582a62009-12-15 09:54:21 +0000583void CodeGenerator::LoadTypeofExpression(Expression* expr) {
584 // Special handling of identifiers as subexpressions of typeof.
Steve Blocka7e24c12009-10-30 11:49:00 +0000585 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +0000586 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000587 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000588 // For a global variable we build the property reference
589 // <global>.<variable> and perform a (regular non-contextual) property
590 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000591 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
592 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000593 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000594 Reference ref(this, &property);
595 ref.GetValueAndSpill();
596 } else if (variable != NULL && variable->slot() != NULL) {
597 // For a variable that rewrites to a slot, we signal it is the immediate
598 // subexpression of a typeof.
599 LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
600 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000601 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000602 // Anything else can be handled normally.
603 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000604 }
605}
606
607
Leon Clarked91b9f72010-01-27 17:25:45 +0000608Reference::Reference(CodeGenerator* cgen,
609 Expression* expression,
610 bool persist_after_get)
611 : cgen_(cgen),
612 expression_(expression),
613 type_(ILLEGAL),
614 persist_after_get_(persist_after_get) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000615 cgen->LoadReference(this);
616}
617
618
619Reference::~Reference() {
Leon Clarked91b9f72010-01-27 17:25:45 +0000620 ASSERT(is_unloaded() || is_illegal());
Steve Blocka7e24c12009-10-30 11:49:00 +0000621}
622
623
624void CodeGenerator::LoadReference(Reference* ref) {
625 VirtualFrame::SpilledScope spilled_scope;
626 Comment cmnt(masm_, "[ LoadReference");
627 Expression* e = ref->expression();
628 Property* property = e->AsProperty();
629 Variable* var = e->AsVariableProxy()->AsVariable();
630
631 if (property != NULL) {
632 // The expression is either a property or a variable proxy that rewrites
633 // to a property.
634 LoadAndSpill(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000635 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000636 ref->set_type(Reference::NAMED);
637 } else {
638 LoadAndSpill(property->key());
639 ref->set_type(Reference::KEYED);
640 }
641 } else if (var != NULL) {
642 // The expression is a variable proxy that does not rewrite to a
643 // property. Global variables are treated as named property references.
644 if (var->is_global()) {
645 LoadGlobal();
646 ref->set_type(Reference::NAMED);
647 } else {
648 ASSERT(var->slot() != NULL);
649 ref->set_type(Reference::SLOT);
650 }
651 } else {
652 // Anything else is a runtime error.
653 LoadAndSpill(e);
654 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
655 }
656}
657
658
659void CodeGenerator::UnloadReference(Reference* ref) {
660 VirtualFrame::SpilledScope spilled_scope;
661 // Pop a reference from the stack while preserving TOS.
662 Comment cmnt(masm_, "[ UnloadReference");
663 int size = ref->size();
664 if (size > 0) {
665 frame_->EmitPop(r0);
666 frame_->Drop(size);
667 frame_->EmitPush(r0);
668 }
Leon Clarked91b9f72010-01-27 17:25:45 +0000669 ref->set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +0000670}
671
672
673// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
674// register to a boolean in the condition code register. The code
675// may jump to 'false_target' in case the register converts to 'false'.
676void CodeGenerator::ToBoolean(JumpTarget* true_target,
677 JumpTarget* false_target) {
678 VirtualFrame::SpilledScope spilled_scope;
679 // Note: The generated code snippet does not change stack variables.
680 // Only the condition code should be set.
681 frame_->EmitPop(r0);
682
683 // Fast case checks
684
685 // Check if the value is 'false'.
686 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
687 __ cmp(r0, ip);
688 false_target->Branch(eq);
689
690 // Check if the value is 'true'.
691 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
692 __ cmp(r0, ip);
693 true_target->Branch(eq);
694
695 // Check if the value is 'undefined'.
696 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
697 __ cmp(r0, ip);
698 false_target->Branch(eq);
699
700 // Check if the value is a smi.
701 __ cmp(r0, Operand(Smi::FromInt(0)));
702 false_target->Branch(eq);
703 __ tst(r0, Operand(kSmiTagMask));
704 true_target->Branch(eq);
705
706 // Slow case: call the runtime.
707 frame_->EmitPush(r0);
708 frame_->CallRuntime(Runtime::kToBool, 1);
709 // Convert the result (r0) to a condition code.
710 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
711 __ cmp(r0, ip);
712
713 cc_reg_ = ne;
714}
715
716
717void CodeGenerator::GenericBinaryOperation(Token::Value op,
718 OverwriteMode overwrite_mode,
719 int constant_rhs) {
720 VirtualFrame::SpilledScope spilled_scope;
721 // sp[0] : y
722 // sp[1] : x
723 // result : r0
724
725 // Stub is entered with a call: 'return address' is in lr.
726 switch (op) {
727 case Token::ADD: // fall through.
728 case Token::SUB: // fall through.
729 case Token::MUL:
730 case Token::DIV:
731 case Token::MOD:
732 case Token::BIT_OR:
733 case Token::BIT_AND:
734 case Token::BIT_XOR:
735 case Token::SHL:
736 case Token::SHR:
737 case Token::SAR: {
738 frame_->EmitPop(r0); // r0 : y
739 frame_->EmitPop(r1); // r1 : x
740 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
741 frame_->CallStub(&stub, 0);
742 break;
743 }
744
745 case Token::COMMA:
746 frame_->EmitPop(r0);
747 // simply discard left value
748 frame_->Drop();
749 break;
750
751 default:
752 // Other cases should have been handled before this point.
753 UNREACHABLE();
754 break;
755 }
756}
757
758
759class DeferredInlineSmiOperation: public DeferredCode {
760 public:
761 DeferredInlineSmiOperation(Token::Value op,
762 int value,
763 bool reversed,
764 OverwriteMode overwrite_mode)
765 : op_(op),
766 value_(value),
767 reversed_(reversed),
768 overwrite_mode_(overwrite_mode) {
769 set_comment("[ DeferredInlinedSmiOperation");
770 }
771
772 virtual void Generate();
773
774 private:
775 Token::Value op_;
776 int value_;
777 bool reversed_;
778 OverwriteMode overwrite_mode_;
779};
780
781
782void DeferredInlineSmiOperation::Generate() {
783 switch (op_) {
784 case Token::ADD: {
785 // Revert optimistic add.
786 if (reversed_) {
787 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
788 __ mov(r1, Operand(Smi::FromInt(value_)));
789 } else {
790 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
791 __ mov(r0, Operand(Smi::FromInt(value_)));
792 }
793 break;
794 }
795
796 case Token::SUB: {
797 // Revert optimistic sub.
798 if (reversed_) {
799 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
800 __ mov(r1, Operand(Smi::FromInt(value_)));
801 } else {
802 __ add(r1, r0, Operand(Smi::FromInt(value_)));
803 __ mov(r0, Operand(Smi::FromInt(value_)));
804 }
805 break;
806 }
807
808 // For these operations there is no optimistic operation that needs to be
809 // reverted.
810 case Token::MUL:
811 case Token::MOD:
812 case Token::BIT_OR:
813 case Token::BIT_XOR:
814 case Token::BIT_AND: {
815 if (reversed_) {
816 __ mov(r1, Operand(Smi::FromInt(value_)));
817 } else {
818 __ mov(r1, Operand(r0));
819 __ mov(r0, Operand(Smi::FromInt(value_)));
820 }
821 break;
822 }
823
824 case Token::SHL:
825 case Token::SHR:
826 case Token::SAR: {
827 if (!reversed_) {
828 __ mov(r1, Operand(r0));
829 __ mov(r0, Operand(Smi::FromInt(value_)));
830 } else {
831 UNREACHABLE(); // Should have been handled in SmiOperation.
832 }
833 break;
834 }
835
836 default:
837 // Other cases should have been handled before this point.
838 UNREACHABLE();
839 break;
840 }
841
842 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
843 __ CallStub(&stub);
844}
845
846
847static bool PopCountLessThanEqual2(unsigned int x) {
848 x &= x - 1;
849 return (x & (x - 1)) == 0;
850}
851
852
853// Returns the index of the lowest bit set.
854static int BitPosition(unsigned x) {
855 int bit_posn = 0;
856 while ((x & 0xf) == 0) {
857 bit_posn += 4;
858 x >>= 4;
859 }
860 while ((x & 1) == 0) {
861 bit_posn++;
862 x >>= 1;
863 }
864 return bit_posn;
865}
866
867
868void CodeGenerator::SmiOperation(Token::Value op,
869 Handle<Object> value,
870 bool reversed,
871 OverwriteMode mode) {
872 VirtualFrame::SpilledScope spilled_scope;
873 // NOTE: This is an attempt to inline (a bit) more of the code for
874 // some possible smi operations (like + and -) when (at least) one
875 // of the operands is a literal smi. With this optimization, the
876 // performance of the system is increased by ~15%, and the generated
877 // code size is increased by ~1% (measured on a combination of
878 // different benchmarks).
879
880 // sp[0] : operand
881
882 int int_value = Smi::cast(*value)->value();
883
884 JumpTarget exit;
885 frame_->EmitPop(r0);
886
887 bool something_to_inline = true;
888 switch (op) {
889 case Token::ADD: {
890 DeferredCode* deferred =
891 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
892
893 __ add(r0, r0, Operand(value), SetCC);
894 deferred->Branch(vs);
895 __ tst(r0, Operand(kSmiTagMask));
896 deferred->Branch(ne);
897 deferred->BindExit();
898 break;
899 }
900
901 case Token::SUB: {
902 DeferredCode* deferred =
903 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
904
905 if (reversed) {
906 __ rsb(r0, r0, Operand(value), SetCC);
907 } else {
908 __ sub(r0, r0, Operand(value), SetCC);
909 }
910 deferred->Branch(vs);
911 __ tst(r0, Operand(kSmiTagMask));
912 deferred->Branch(ne);
913 deferred->BindExit();
914 break;
915 }
916
917
918 case Token::BIT_OR:
919 case Token::BIT_XOR:
920 case Token::BIT_AND: {
921 DeferredCode* deferred =
922 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
923 __ tst(r0, Operand(kSmiTagMask));
924 deferred->Branch(ne);
925 switch (op) {
926 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
927 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
928 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
929 default: UNREACHABLE();
930 }
931 deferred->BindExit();
932 break;
933 }
934
935 case Token::SHL:
936 case Token::SHR:
937 case Token::SAR: {
938 if (reversed) {
939 something_to_inline = false;
940 break;
941 }
942 int shift_value = int_value & 0x1f; // least significant 5 bits
943 DeferredCode* deferred =
944 new DeferredInlineSmiOperation(op, shift_value, false, mode);
945 __ tst(r0, Operand(kSmiTagMask));
946 deferred->Branch(ne);
947 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
948 switch (op) {
949 case Token::SHL: {
950 if (shift_value != 0) {
951 __ mov(r2, Operand(r2, LSL, shift_value));
952 }
953 // check that the *unsigned* result fits in a smi
954 __ add(r3, r2, Operand(0x40000000), SetCC);
955 deferred->Branch(mi);
956 break;
957 }
958 case Token::SHR: {
959 // LSR by immediate 0 means shifting 32 bits.
960 if (shift_value != 0) {
961 __ mov(r2, Operand(r2, LSR, shift_value));
962 }
963 // check that the *unsigned* result fits in a smi
964 // neither of the two high-order bits can be set:
965 // - 0x80000000: high bit would be lost when smi tagging
966 // - 0x40000000: this number would convert to negative when
967 // smi tagging these two cases can only happen with shifts
968 // by 0 or 1 when handed a valid smi
969 __ and_(r3, r2, Operand(0xc0000000), SetCC);
970 deferred->Branch(ne);
971 break;
972 }
973 case Token::SAR: {
974 if (shift_value != 0) {
975 // ASR by immediate 0 means shifting 32 bits.
976 __ mov(r2, Operand(r2, ASR, shift_value));
977 }
978 break;
979 }
980 default: UNREACHABLE();
981 }
982 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
983 deferred->BindExit();
984 break;
985 }
986
987 case Token::MOD: {
988 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
989 something_to_inline = false;
990 break;
991 }
992 DeferredCode* deferred =
993 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
994 unsigned mask = (0x80000000u | kSmiTagMask);
995 __ tst(r0, Operand(mask));
996 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
997 mask = (int_value << kSmiTagSize) - 1;
998 __ and_(r0, r0, Operand(mask));
999 deferred->BindExit();
1000 break;
1001 }
1002
1003 case Token::MUL: {
1004 if (!IsEasyToMultiplyBy(int_value)) {
1005 something_to_inline = false;
1006 break;
1007 }
1008 DeferredCode* deferred =
1009 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1010 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1011 max_smi_that_wont_overflow <<= kSmiTagSize;
1012 unsigned mask = 0x80000000u;
1013 while ((mask & max_smi_that_wont_overflow) == 0) {
1014 mask |= mask >> 1;
1015 }
1016 mask |= kSmiTagMask;
1017 // This does a single mask that checks for a too high value in a
1018 // conservative way and for a non-Smi. It also filters out negative
1019 // numbers, unfortunately, but since this code is inline we prefer
1020 // brevity to comprehensiveness.
1021 __ tst(r0, Operand(mask));
1022 deferred->Branch(ne);
1023 MultiplyByKnownInt(masm_, r0, r0, int_value);
1024 deferred->BindExit();
1025 break;
1026 }
1027
1028 default:
1029 something_to_inline = false;
1030 break;
1031 }
1032
1033 if (!something_to_inline) {
1034 if (!reversed) {
1035 frame_->EmitPush(r0);
1036 __ mov(r0, Operand(value));
1037 frame_->EmitPush(r0);
1038 GenericBinaryOperation(op, mode, int_value);
1039 } else {
1040 __ mov(ip, Operand(value));
1041 frame_->EmitPush(ip);
1042 frame_->EmitPush(r0);
1043 GenericBinaryOperation(op, mode, kUnknownIntValue);
1044 }
1045 }
1046
1047 exit.Bind();
1048}
1049
1050
1051void CodeGenerator::Comparison(Condition cc,
1052 Expression* left,
1053 Expression* right,
1054 bool strict) {
1055 if (left != NULL) LoadAndSpill(left);
1056 if (right != NULL) LoadAndSpill(right);
1057
1058 VirtualFrame::SpilledScope spilled_scope;
1059 // sp[0] : y
1060 // sp[1] : x
1061 // result : cc register
1062
1063 // Strict only makes sense for equality comparisons.
1064 ASSERT(!strict || cc == eq);
1065
1066 JumpTarget exit;
1067 JumpTarget smi;
1068 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1069 if (cc == gt || cc == le) {
1070 cc = ReverseCondition(cc);
1071 frame_->EmitPop(r1);
1072 frame_->EmitPop(r0);
1073 } else {
1074 frame_->EmitPop(r0);
1075 frame_->EmitPop(r1);
1076 }
1077 __ orr(r2, r0, Operand(r1));
1078 __ tst(r2, Operand(kSmiTagMask));
1079 smi.Branch(eq);
1080
1081 // Perform non-smi comparison by stub.
1082 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1083 // We call with 0 args because there are 0 on the stack.
1084 CompareStub stub(cc, strict);
1085 frame_->CallStub(&stub, 0);
1086 __ cmp(r0, Operand(0));
1087 exit.Jump();
1088
1089 // Do smi comparisons by pointer comparison.
1090 smi.Bind();
1091 __ cmp(r1, Operand(r0));
1092
1093 exit.Bind();
1094 cc_reg_ = cc;
1095}
1096
1097
Steve Blocka7e24c12009-10-30 11:49:00 +00001098// Call the function on the stack with the given arguments.
1099void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001100 CallFunctionFlags flags,
1101 int position) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001102 VirtualFrame::SpilledScope spilled_scope;
1103 // Push the arguments ("left-to-right") on the stack.
1104 int arg_count = args->length();
1105 for (int i = 0; i < arg_count; i++) {
1106 LoadAndSpill(args->at(i));
1107 }
1108
1109 // Record the position for debugging purposes.
1110 CodeForSourcePosition(position);
1111
1112 // Use the shared code stub to call the function.
1113 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001114 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001115 frame_->CallStub(&call_function, arg_count + 1);
1116
1117 // Restore context and pop function from the stack.
1118 __ ldr(cp, frame_->Context());
1119 frame_->Drop(); // discard the TOS
1120}
1121
1122
1123void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1124 VirtualFrame::SpilledScope spilled_scope;
1125 ASSERT(has_cc());
1126 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1127 target->Branch(cc);
1128 cc_reg_ = al;
1129}
1130
1131
1132void CodeGenerator::CheckStack() {
1133 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +00001134 Comment cmnt(masm_, "[ check stack");
1135 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1136 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1137 // the implicit 8 byte offset that always applies to operations with pc and
1138 // gives a return address 12 bytes down.
1139 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1140 masm_->cmp(sp, Operand(ip));
1141 StackCheckStub stub;
1142 // Call the stub if lower.
1143 masm_->mov(pc,
1144 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1145 RelocInfo::CODE_TARGET),
1146 LeaveCC,
1147 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001148}
1149
1150
1151void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1152#ifdef DEBUG
1153 int original_height = frame_->height();
1154#endif
1155 VirtualFrame::SpilledScope spilled_scope;
1156 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1157 VisitAndSpill(statements->at(i));
1158 }
1159 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1160}
1161
1162
1163void CodeGenerator::VisitBlock(Block* node) {
1164#ifdef DEBUG
1165 int original_height = frame_->height();
1166#endif
1167 VirtualFrame::SpilledScope spilled_scope;
1168 Comment cmnt(masm_, "[ Block");
1169 CodeForStatementPosition(node);
1170 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1171 VisitStatementsAndSpill(node->statements());
1172 if (node->break_target()->is_linked()) {
1173 node->break_target()->Bind();
1174 }
1175 node->break_target()->Unuse();
1176 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1177}
1178
1179
1180void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1181 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001182 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001183 __ mov(r0, Operand(pairs));
1184 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001185 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1186 frame_->EmitPush(r0);
1187 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1188 // The result is discarded.
1189}
1190
1191
1192void CodeGenerator::VisitDeclaration(Declaration* node) {
1193#ifdef DEBUG
1194 int original_height = frame_->height();
1195#endif
1196 VirtualFrame::SpilledScope spilled_scope;
1197 Comment cmnt(masm_, "[ Declaration");
1198 Variable* var = node->proxy()->var();
1199 ASSERT(var != NULL); // must have been resolved
1200 Slot* slot = var->slot();
1201
1202 // If it was not possible to allocate the variable at compile time,
1203 // we need to "declare" it at runtime to make sure it actually
1204 // exists in the local context.
1205 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1206 // Variables with a "LOOKUP" slot were introduced as non-locals
1207 // during variable resolution and must have mode DYNAMIC.
1208 ASSERT(var->is_dynamic());
1209 // For now, just do a runtime call.
1210 frame_->EmitPush(cp);
1211 __ mov(r0, Operand(var->name()));
1212 frame_->EmitPush(r0);
1213 // Declaration nodes are always declared in only two modes.
1214 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1215 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1216 __ mov(r0, Operand(Smi::FromInt(attr)));
1217 frame_->EmitPush(r0);
1218 // Push initial value, if any.
1219 // Note: For variables we must not push an initial value (such as
1220 // 'undefined') because we may have a (legal) redeclaration and we
1221 // must not destroy the current value.
1222 if (node->mode() == Variable::CONST) {
1223 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1224 frame_->EmitPush(r0);
1225 } else if (node->fun() != NULL) {
1226 LoadAndSpill(node->fun());
1227 } else {
1228 __ mov(r0, Operand(0)); // no initial value!
1229 frame_->EmitPush(r0);
1230 }
1231 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1232 // Ignore the return value (declarations are statements).
1233 ASSERT(frame_->height() == original_height);
1234 return;
1235 }
1236
1237 ASSERT(!var->is_global());
1238
1239 // If we have a function or a constant, we need to initialize the variable.
1240 Expression* val = NULL;
1241 if (node->mode() == Variable::CONST) {
1242 val = new Literal(Factory::the_hole_value());
1243 } else {
1244 val = node->fun(); // NULL if we don't have a function
1245 }
1246
1247 if (val != NULL) {
1248 {
1249 // Set initial value.
1250 Reference target(this, node->proxy());
1251 LoadAndSpill(val);
1252 target.SetValue(NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00001253 }
1254 // Get rid of the assigned value (declarations are statements).
1255 frame_->Drop();
1256 }
1257 ASSERT(frame_->height() == original_height);
1258}
1259
1260
1261void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1262#ifdef DEBUG
1263 int original_height = frame_->height();
1264#endif
1265 VirtualFrame::SpilledScope spilled_scope;
1266 Comment cmnt(masm_, "[ ExpressionStatement");
1267 CodeForStatementPosition(node);
1268 Expression* expression = node->expression();
1269 expression->MarkAsStatement();
1270 LoadAndSpill(expression);
1271 frame_->Drop();
1272 ASSERT(frame_->height() == original_height);
1273}
1274
1275
1276void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1277#ifdef DEBUG
1278 int original_height = frame_->height();
1279#endif
1280 VirtualFrame::SpilledScope spilled_scope;
1281 Comment cmnt(masm_, "// EmptyStatement");
1282 CodeForStatementPosition(node);
1283 // nothing to do
1284 ASSERT(frame_->height() == original_height);
1285}
1286
1287
1288void CodeGenerator::VisitIfStatement(IfStatement* node) {
1289#ifdef DEBUG
1290 int original_height = frame_->height();
1291#endif
1292 VirtualFrame::SpilledScope spilled_scope;
1293 Comment cmnt(masm_, "[ IfStatement");
1294 // Generate different code depending on which parts of the if statement
1295 // are present or not.
1296 bool has_then_stm = node->HasThenStatement();
1297 bool has_else_stm = node->HasElseStatement();
1298
1299 CodeForStatementPosition(node);
1300
1301 JumpTarget exit;
1302 if (has_then_stm && has_else_stm) {
1303 Comment cmnt(masm_, "[ IfThenElse");
1304 JumpTarget then;
1305 JumpTarget else_;
1306 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001307 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001308 if (frame_ != NULL) {
1309 Branch(false, &else_);
1310 }
1311 // then
1312 if (frame_ != NULL || then.is_linked()) {
1313 then.Bind();
1314 VisitAndSpill(node->then_statement());
1315 }
1316 if (frame_ != NULL) {
1317 exit.Jump();
1318 }
1319 // else
1320 if (else_.is_linked()) {
1321 else_.Bind();
1322 VisitAndSpill(node->else_statement());
1323 }
1324
1325 } else if (has_then_stm) {
1326 Comment cmnt(masm_, "[ IfThen");
1327 ASSERT(!has_else_stm);
1328 JumpTarget then;
1329 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001330 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001331 if (frame_ != NULL) {
1332 Branch(false, &exit);
1333 }
1334 // then
1335 if (frame_ != NULL || then.is_linked()) {
1336 then.Bind();
1337 VisitAndSpill(node->then_statement());
1338 }
1339
1340 } else if (has_else_stm) {
1341 Comment cmnt(masm_, "[ IfElse");
1342 ASSERT(!has_then_stm);
1343 JumpTarget else_;
1344 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001345 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 if (frame_ != NULL) {
1347 Branch(true, &exit);
1348 }
1349 // else
1350 if (frame_ != NULL || else_.is_linked()) {
1351 else_.Bind();
1352 VisitAndSpill(node->else_statement());
1353 }
1354
1355 } else {
1356 Comment cmnt(masm_, "[ If");
1357 ASSERT(!has_then_stm && !has_else_stm);
1358 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001359 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001360 if (frame_ != NULL) {
1361 if (has_cc()) {
1362 cc_reg_ = al;
1363 } else {
1364 frame_->Drop();
1365 }
1366 }
1367 }
1368
1369 // end
1370 if (exit.is_linked()) {
1371 exit.Bind();
1372 }
1373 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1374}
1375
1376
1377void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1378 VirtualFrame::SpilledScope spilled_scope;
1379 Comment cmnt(masm_, "[ ContinueStatement");
1380 CodeForStatementPosition(node);
1381 node->target()->continue_target()->Jump();
1382}
1383
1384
1385void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1386 VirtualFrame::SpilledScope spilled_scope;
1387 Comment cmnt(masm_, "[ BreakStatement");
1388 CodeForStatementPosition(node);
1389 node->target()->break_target()->Jump();
1390}
1391
1392
1393void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1394 VirtualFrame::SpilledScope spilled_scope;
1395 Comment cmnt(masm_, "[ ReturnStatement");
1396
1397 CodeForStatementPosition(node);
1398 LoadAndSpill(node->expression());
1399 if (function_return_is_shadowed_) {
1400 frame_->EmitPop(r0);
1401 function_return_.Jump();
1402 } else {
1403 // Pop the result from the frame and prepare the frame for
1404 // returning thus making it easier to merge.
1405 frame_->EmitPop(r0);
1406 frame_->PrepareForReturn();
1407
1408 function_return_.Jump();
1409 }
1410}
1411
1412
1413void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1414#ifdef DEBUG
1415 int original_height = frame_->height();
1416#endif
1417 VirtualFrame::SpilledScope spilled_scope;
1418 Comment cmnt(masm_, "[ WithEnterStatement");
1419 CodeForStatementPosition(node);
1420 LoadAndSpill(node->expression());
1421 if (node->is_catch_block()) {
1422 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1423 } else {
1424 frame_->CallRuntime(Runtime::kPushContext, 1);
1425 }
1426#ifdef DEBUG
1427 JumpTarget verified_true;
1428 __ cmp(r0, Operand(cp));
1429 verified_true.Branch(eq);
1430 __ stop("PushContext: r0 is expected to be the same as cp");
1431 verified_true.Bind();
1432#endif
1433 // Update context local.
1434 __ str(cp, frame_->Context());
1435 ASSERT(frame_->height() == original_height);
1436}
1437
1438
1439void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1440#ifdef DEBUG
1441 int original_height = frame_->height();
1442#endif
1443 VirtualFrame::SpilledScope spilled_scope;
1444 Comment cmnt(masm_, "[ WithExitStatement");
1445 CodeForStatementPosition(node);
1446 // Pop context.
1447 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1448 // Update context local.
1449 __ str(cp, frame_->Context());
1450 ASSERT(frame_->height() == original_height);
1451}
1452
1453
1454void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1455#ifdef DEBUG
1456 int original_height = frame_->height();
1457#endif
1458 VirtualFrame::SpilledScope spilled_scope;
1459 Comment cmnt(masm_, "[ SwitchStatement");
1460 CodeForStatementPosition(node);
1461 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1462
1463 LoadAndSpill(node->tag());
1464
1465 JumpTarget next_test;
1466 JumpTarget fall_through;
1467 JumpTarget default_entry;
1468 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1469 ZoneList<CaseClause*>* cases = node->cases();
1470 int length = cases->length();
1471 CaseClause* default_clause = NULL;
1472
1473 for (int i = 0; i < length; i++) {
1474 CaseClause* clause = cases->at(i);
1475 if (clause->is_default()) {
1476 // Remember the default clause and compile it at the end.
1477 default_clause = clause;
1478 continue;
1479 }
1480
1481 Comment cmnt(masm_, "[ Case clause");
1482 // Compile the test.
1483 next_test.Bind();
1484 next_test.Unuse();
1485 // Duplicate TOS.
1486 __ ldr(r0, frame_->Top());
1487 frame_->EmitPush(r0);
1488 Comparison(eq, NULL, clause->label(), true);
1489 Branch(false, &next_test);
1490
1491 // Before entering the body from the test, remove the switch value from
1492 // the stack.
1493 frame_->Drop();
1494
1495 // Label the body so that fall through is enabled.
1496 if (i > 0 && cases->at(i - 1)->is_default()) {
1497 default_exit.Bind();
1498 } else {
1499 fall_through.Bind();
1500 fall_through.Unuse();
1501 }
1502 VisitStatementsAndSpill(clause->statements());
1503
1504 // If control flow can fall through from the body, jump to the next body
1505 // or the end of the statement.
1506 if (frame_ != NULL) {
1507 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1508 default_entry.Jump();
1509 } else {
1510 fall_through.Jump();
1511 }
1512 }
1513 }
1514
1515 // The final "test" removes the switch value.
1516 next_test.Bind();
1517 frame_->Drop();
1518
1519 // If there is a default clause, compile it.
1520 if (default_clause != NULL) {
1521 Comment cmnt(masm_, "[ Default clause");
1522 default_entry.Bind();
1523 VisitStatementsAndSpill(default_clause->statements());
1524 // If control flow can fall out of the default and there is a case after
1525 // it, jup to that case's body.
1526 if (frame_ != NULL && default_exit.is_bound()) {
1527 default_exit.Jump();
1528 }
1529 }
1530
1531 if (fall_through.is_linked()) {
1532 fall_through.Bind();
1533 }
1534
1535 if (node->break_target()->is_linked()) {
1536 node->break_target()->Bind();
1537 }
1538 node->break_target()->Unuse();
1539 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1540}
1541
1542
Steve Block3ce2e202009-11-05 08:53:23 +00001543void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001544#ifdef DEBUG
1545 int original_height = frame_->height();
1546#endif
1547 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001548 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001549 CodeForStatementPosition(node);
1550 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001551 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001552
Steve Block3ce2e202009-11-05 08:53:23 +00001553 // Label the top of the loop for the backward CFG edge. If the test
1554 // is always true we can use the continue target, and if the test is
1555 // always false there is no need.
1556 ConditionAnalysis info = AnalyzeCondition(node->cond());
1557 switch (info) {
1558 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001559 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1560 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001561 break;
1562 case ALWAYS_FALSE:
1563 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1564 break;
1565 case DONT_KNOW:
1566 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1567 body.Bind();
1568 break;
1569 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001570
Steve Block3ce2e202009-11-05 08:53:23 +00001571 CheckStack(); // TODO(1222600): ignore if body contains calls.
1572 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001573
Steve Blockd0582a62009-12-15 09:54:21 +00001574 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001575 switch (info) {
1576 case ALWAYS_TRUE:
1577 // If control can fall off the end of the body, jump back to the
1578 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001580 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001581 }
1582 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001583 case ALWAYS_FALSE:
1584 // If we have a continue in the body, we only have to bind its
1585 // jump target.
1586 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001587 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001588 }
Steve Block3ce2e202009-11-05 08:53:23 +00001589 break;
1590 case DONT_KNOW:
1591 // We have to compile the test expression if it can be reached by
1592 // control flow falling out of the body or via continue.
1593 if (node->continue_target()->is_linked()) {
1594 node->continue_target()->Bind();
1595 }
1596 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001597 Comment cmnt(masm_, "[ DoWhileCondition");
1598 CodeForDoWhileConditionPosition(node);
1599 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001601 // A invalid frame here indicates that control did not
1602 // fall out of the test expression.
1603 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001604 }
1605 }
1606 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 }
1608
1609 if (node->break_target()->is_linked()) {
1610 node->break_target()->Bind();
1611 }
Steve Block3ce2e202009-11-05 08:53:23 +00001612 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1613}
1614
1615
1616void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1617#ifdef DEBUG
1618 int original_height = frame_->height();
1619#endif
1620 VirtualFrame::SpilledScope spilled_scope;
1621 Comment cmnt(masm_, "[ WhileStatement");
1622 CodeForStatementPosition(node);
1623
1624 // If the test is never true and has no side effects there is no need
1625 // to compile the test or body.
1626 ConditionAnalysis info = AnalyzeCondition(node->cond());
1627 if (info == ALWAYS_FALSE) return;
1628
1629 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1630
1631 // Label the top of the loop with the continue target for the backward
1632 // CFG edge.
1633 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1634 node->continue_target()->Bind();
1635
1636 if (info == DONT_KNOW) {
1637 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001638 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001639 if (has_valid_frame()) {
1640 // A NULL frame indicates that control did not fall out of the
1641 // test expression.
1642 Branch(false, node->break_target());
1643 }
1644 if (has_valid_frame() || body.is_linked()) {
1645 body.Bind();
1646 }
1647 }
1648
1649 if (has_valid_frame()) {
1650 CheckStack(); // TODO(1222600): ignore if body contains calls.
1651 VisitAndSpill(node->body());
1652
1653 // If control flow can fall out of the body, jump back to the top.
1654 if (has_valid_frame()) {
1655 node->continue_target()->Jump();
1656 }
1657 }
1658 if (node->break_target()->is_linked()) {
1659 node->break_target()->Bind();
1660 }
1661 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1662}
1663
1664
1665void CodeGenerator::VisitForStatement(ForStatement* node) {
1666#ifdef DEBUG
1667 int original_height = frame_->height();
1668#endif
1669 VirtualFrame::SpilledScope spilled_scope;
1670 Comment cmnt(masm_, "[ ForStatement");
1671 CodeForStatementPosition(node);
1672 if (node->init() != NULL) {
1673 VisitAndSpill(node->init());
1674 }
1675
1676 // If the test is never true there is no need to compile the test or
1677 // body.
1678 ConditionAnalysis info = AnalyzeCondition(node->cond());
1679 if (info == ALWAYS_FALSE) return;
1680
1681 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1682
1683 // If there is no update statement, label the top of the loop with the
1684 // continue target, otherwise with the loop target.
1685 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1686 if (node->next() == NULL) {
1687 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1688 node->continue_target()->Bind();
1689 } else {
1690 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1691 loop.Bind();
1692 }
1693
1694 // If the test is always true, there is no need to compile it.
1695 if (info == DONT_KNOW) {
1696 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001697 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001698 if (has_valid_frame()) {
1699 Branch(false, node->break_target());
1700 }
1701 if (has_valid_frame() || body.is_linked()) {
1702 body.Bind();
1703 }
1704 }
1705
1706 if (has_valid_frame()) {
1707 CheckStack(); // TODO(1222600): ignore if body contains calls.
1708 VisitAndSpill(node->body());
1709
1710 if (node->next() == NULL) {
1711 // If there is no update statement and control flow can fall out
1712 // of the loop, jump directly to the continue label.
1713 if (has_valid_frame()) {
1714 node->continue_target()->Jump();
1715 }
1716 } else {
1717 // If there is an update statement and control flow can reach it
1718 // via falling out of the body of the loop or continuing, we
1719 // compile the update statement.
1720 if (node->continue_target()->is_linked()) {
1721 node->continue_target()->Bind();
1722 }
1723 if (has_valid_frame()) {
1724 // Record source position of the statement as this code which is
1725 // after the code for the body actually belongs to the loop
1726 // statement and not the body.
1727 CodeForStatementPosition(node);
1728 VisitAndSpill(node->next());
1729 loop.Jump();
1730 }
1731 }
1732 }
1733 if (node->break_target()->is_linked()) {
1734 node->break_target()->Bind();
1735 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001736 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1737}
1738
1739
1740void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1741#ifdef DEBUG
1742 int original_height = frame_->height();
1743#endif
1744 VirtualFrame::SpilledScope spilled_scope;
1745 Comment cmnt(masm_, "[ ForInStatement");
1746 CodeForStatementPosition(node);
1747
1748 JumpTarget primitive;
1749 JumpTarget jsobject;
1750 JumpTarget fixed_array;
1751 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1752 JumpTarget end_del_check;
1753 JumpTarget exit;
1754
1755 // Get the object to enumerate over (converted to JSObject).
1756 LoadAndSpill(node->enumerable());
1757
1758 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1759 // to the specification. 12.6.4 mandates a call to ToObject.
1760 frame_->EmitPop(r0);
1761 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1762 __ cmp(r0, ip);
1763 exit.Branch(eq);
1764 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1765 __ cmp(r0, ip);
1766 exit.Branch(eq);
1767
1768 // Stack layout in body:
1769 // [iteration counter (Smi)]
1770 // [length of array]
1771 // [FixedArray]
1772 // [Map or 0]
1773 // [Object]
1774
1775 // Check if enumerable is already a JSObject
1776 __ tst(r0, Operand(kSmiTagMask));
1777 primitive.Branch(eq);
1778 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1779 jsobject.Branch(hs);
1780
1781 primitive.Bind();
1782 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00001783 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001784
1785 jsobject.Bind();
1786 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00001787 // r0: value to be iterated over
1788 frame_->EmitPush(r0); // Push the object being iterated over.
1789
1790 // Check cache validity in generated code. This is a fast case for
1791 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1792 // guarantee cache validity, call the runtime system to check cache
1793 // validity or get the property names in a fixed array.
1794 JumpTarget call_runtime;
1795 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1796 JumpTarget check_prototype;
1797 JumpTarget use_cache;
1798 __ mov(r1, Operand(r0));
1799 loop.Bind();
1800 // Check that there are no elements.
1801 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
1802 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
1803 __ cmp(r2, r4);
1804 call_runtime.Branch(ne);
1805 // Check that instance descriptors are not empty so that we can
1806 // check for an enum cache. Leave the map in r3 for the subsequent
1807 // prototype load.
1808 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
1809 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
1810 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
1811 __ cmp(r2, ip);
1812 call_runtime.Branch(eq);
1813 // Check that there in an enum cache in the non-empty instance
1814 // descriptors. This is the case if the next enumeration index
1815 // field does not contain a smi.
1816 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
1817 __ tst(r2, Operand(kSmiTagMask));
1818 call_runtime.Branch(eq);
1819 // For all objects but the receiver, check that the cache is empty.
1820 // r4: empty fixed array root.
1821 __ cmp(r1, r0);
1822 check_prototype.Branch(eq);
1823 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1824 __ cmp(r2, r4);
1825 call_runtime.Branch(ne);
1826 check_prototype.Bind();
1827 // Load the prototype from the map and loop if non-null.
1828 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
1829 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1830 __ cmp(r1, ip);
1831 loop.Branch(ne);
1832 // The enum cache is valid. Load the map of the object being
1833 // iterated over and use the cache for the iteration.
1834 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
1835 use_cache.Jump();
1836
1837 call_runtime.Bind();
1838 // Call the runtime to get the property names for the object.
1839 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00001840 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1841
Steve Blockd0582a62009-12-15 09:54:21 +00001842 // If we got a map from the runtime call, we can do a fast
1843 // modification check. Otherwise, we got a fixed array, and we have
1844 // to do a slow check.
1845 // r0: map or fixed array (result from call to
1846 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 __ mov(r2, Operand(r0));
1848 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1849 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1850 __ cmp(r1, ip);
1851 fixed_array.Branch(ne);
1852
Steve Blockd0582a62009-12-15 09:54:21 +00001853 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001855 // r0: map (either the result from a call to
1856 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1857 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 __ mov(r1, Operand(r0));
1859 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1860 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1861 __ ldr(r2,
1862 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1863
1864 frame_->EmitPush(r0); // map
1865 frame_->EmitPush(r2); // enum cache bridge cache
1866 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1867 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1868 frame_->EmitPush(r0);
1869 __ mov(r0, Operand(Smi::FromInt(0)));
1870 frame_->EmitPush(r0);
1871 entry.Jump();
1872
1873 fixed_array.Bind();
1874 __ mov(r1, Operand(Smi::FromInt(0)));
1875 frame_->EmitPush(r1); // insert 0 in place of Map
1876 frame_->EmitPush(r0);
1877
1878 // Push the length of the array and the initial index onto the stack.
1879 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1880 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1881 frame_->EmitPush(r0);
1882 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1883 frame_->EmitPush(r0);
1884
1885 // Condition.
1886 entry.Bind();
1887 // sp[0] : index
1888 // sp[1] : array/enum cache length
1889 // sp[2] : array or enum cache
1890 // sp[3] : 0 or map
1891 // sp[4] : enumerable
1892 // Grab the current frame's height for the break and continue
1893 // targets only after all the state is pushed on the frame.
1894 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1895 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1896
1897 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1898 __ ldr(r1, frame_->ElementAt(1)); // load the length
1899 __ cmp(r0, Operand(r1)); // compare to the array length
1900 node->break_target()->Branch(hs);
1901
1902 __ ldr(r0, frame_->ElementAt(0));
1903
1904 // Get the i'th entry of the array.
1905 __ ldr(r2, frame_->ElementAt(2));
1906 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1907 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1908
1909 // Get Map or 0.
1910 __ ldr(r2, frame_->ElementAt(3));
1911 // Check if this (still) matches the map of the enumerable.
1912 // If not, we have to filter the key.
1913 __ ldr(r1, frame_->ElementAt(4));
1914 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1915 __ cmp(r1, Operand(r2));
1916 end_del_check.Branch(eq);
1917
1918 // Convert the entry to a string (or null if it isn't a property anymore).
1919 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1920 frame_->EmitPush(r0);
1921 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00001922 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001923 __ mov(r3, Operand(r0));
1924
1925 // If the property has been removed while iterating, we just skip it.
1926 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1927 __ cmp(r3, ip);
1928 node->continue_target()->Branch(eq);
1929
1930 end_del_check.Bind();
1931 // Store the entry in the 'each' expression and take another spin in the
1932 // loop. r3: i'th entry of the enum cache (or string there of)
1933 frame_->EmitPush(r3); // push entry
1934 { Reference each(this, node->each());
1935 if (!each.is_illegal()) {
1936 if (each.size() > 0) {
1937 __ ldr(r0, frame_->ElementAt(each.size()));
1938 frame_->EmitPush(r0);
Leon Clarked91b9f72010-01-27 17:25:45 +00001939 each.SetValue(NOT_CONST_INIT);
1940 frame_->Drop(2);
1941 } else {
1942 // If the reference was to a slot we rely on the convenient property
1943 // that it doesn't matter whether a value (eg, r3 pushed above) is
1944 // right on top of or right underneath a zero-sized reference.
1945 each.SetValue(NOT_CONST_INIT);
1946 frame_->Drop();
Steve Blocka7e24c12009-10-30 11:49:00 +00001947 }
1948 }
1949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 // Body.
1951 CheckStack(); // TODO(1222600): ignore if body contains calls.
1952 VisitAndSpill(node->body());
1953
1954 // Next. Reestablish a spilled frame in case we are coming here via
1955 // a continue in the body.
1956 node->continue_target()->Bind();
1957 frame_->SpillAll();
1958 frame_->EmitPop(r0);
1959 __ add(r0, r0, Operand(Smi::FromInt(1)));
1960 frame_->EmitPush(r0);
1961 entry.Jump();
1962
1963 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1964 // any frame.
1965 node->break_target()->Bind();
1966 frame_->Drop(5);
1967
1968 // Exit.
1969 exit.Bind();
1970 node->continue_target()->Unuse();
1971 node->break_target()->Unuse();
1972 ASSERT(frame_->height() == original_height);
1973}
1974
1975
Steve Block3ce2e202009-11-05 08:53:23 +00001976void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001977#ifdef DEBUG
1978 int original_height = frame_->height();
1979#endif
1980 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001981 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 CodeForStatementPosition(node);
1983
1984 JumpTarget try_block;
1985 JumpTarget exit;
1986
1987 try_block.Call();
1988 // --- Catch block ---
1989 frame_->EmitPush(r0);
1990
1991 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00001992 Variable* catch_var = node->catch_var()->var();
1993 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
1994 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00001995
1996 // Remove the exception from the stack.
1997 frame_->Drop();
1998
1999 VisitStatementsAndSpill(node->catch_block()->statements());
2000 if (frame_ != NULL) {
2001 exit.Jump();
2002 }
2003
2004
2005 // --- Try block ---
2006 try_block.Bind();
2007
2008 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2009 int handler_height = frame_->height();
2010
2011 // Shadow the labels for all escapes from the try block, including
2012 // returns. During shadowing, the original label is hidden as the
2013 // LabelShadow and operations on the original actually affect the
2014 // shadowing label.
2015 //
2016 // We should probably try to unify the escaping labels and the return
2017 // label.
2018 int nof_escapes = node->escaping_targets()->length();
2019 List<ShadowTarget*> shadows(1 + nof_escapes);
2020
2021 // Add the shadow target for the function return.
2022 static const int kReturnShadowIndex = 0;
2023 shadows.Add(new ShadowTarget(&function_return_));
2024 bool function_return_was_shadowed = function_return_is_shadowed_;
2025 function_return_is_shadowed_ = true;
2026 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2027
2028 // Add the remaining shadow targets.
2029 for (int i = 0; i < nof_escapes; i++) {
2030 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2031 }
2032
2033 // Generate code for the statements in the try block.
2034 VisitStatementsAndSpill(node->try_block()->statements());
2035
2036 // Stop the introduced shadowing and count the number of required unlinks.
2037 // After shadowing stops, the original labels are unshadowed and the
2038 // LabelShadows represent the formerly shadowing labels.
2039 bool has_unlinks = false;
2040 for (int i = 0; i < shadows.length(); i++) {
2041 shadows[i]->StopShadowing();
2042 has_unlinks = has_unlinks || shadows[i]->is_linked();
2043 }
2044 function_return_is_shadowed_ = function_return_was_shadowed;
2045
2046 // Get an external reference to the handler address.
2047 ExternalReference handler_address(Top::k_handler_address);
2048
2049 // If we can fall off the end of the try block, unlink from try chain.
2050 if (has_valid_frame()) {
2051 // The next handler address is on top of the frame. Unlink from
2052 // the handler list and drop the rest of this handler from the
2053 // frame.
2054 ASSERT(StackHandlerConstants::kNextOffset == 0);
2055 frame_->EmitPop(r1);
2056 __ mov(r3, Operand(handler_address));
2057 __ str(r1, MemOperand(r3));
2058 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2059 if (has_unlinks) {
2060 exit.Jump();
2061 }
2062 }
2063
2064 // Generate unlink code for the (formerly) shadowing labels that have been
2065 // jumped to. Deallocate each shadow target.
2066 for (int i = 0; i < shadows.length(); i++) {
2067 if (shadows[i]->is_linked()) {
2068 // Unlink from try chain;
2069 shadows[i]->Bind();
2070 // Because we can be jumping here (to spilled code) from unspilled
2071 // code, we need to reestablish a spilled frame at this block.
2072 frame_->SpillAll();
2073
2074 // Reload sp from the top handler, because some statements that we
2075 // break from (eg, for...in) may have left stuff on the stack.
2076 __ mov(r3, Operand(handler_address));
2077 __ ldr(sp, MemOperand(r3));
2078 frame_->Forget(frame_->height() - handler_height);
2079
2080 ASSERT(StackHandlerConstants::kNextOffset == 0);
2081 frame_->EmitPop(r1);
2082 __ str(r1, MemOperand(r3));
2083 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2084
2085 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2086 frame_->PrepareForReturn();
2087 }
2088 shadows[i]->other_target()->Jump();
2089 }
2090 }
2091
2092 exit.Bind();
2093 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2094}
2095
2096
Steve Block3ce2e202009-11-05 08:53:23 +00002097void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002098#ifdef DEBUG
2099 int original_height = frame_->height();
2100#endif
2101 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002102 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002103 CodeForStatementPosition(node);
2104
2105 // State: Used to keep track of reason for entering the finally
2106 // block. Should probably be extended to hold information for
2107 // break/continue from within the try block.
2108 enum { FALLING, THROWING, JUMPING };
2109
2110 JumpTarget try_block;
2111 JumpTarget finally_block;
2112
2113 try_block.Call();
2114
2115 frame_->EmitPush(r0); // save exception object on the stack
2116 // In case of thrown exceptions, this is where we continue.
2117 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2118 finally_block.Jump();
2119
2120 // --- Try block ---
2121 try_block.Bind();
2122
2123 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2124 int handler_height = frame_->height();
2125
2126 // Shadow the labels for all escapes from the try block, including
2127 // returns. Shadowing hides the original label as the LabelShadow and
2128 // operations on the original actually affect the shadowing label.
2129 //
2130 // We should probably try to unify the escaping labels and the return
2131 // label.
2132 int nof_escapes = node->escaping_targets()->length();
2133 List<ShadowTarget*> shadows(1 + nof_escapes);
2134
2135 // Add the shadow target for the function return.
2136 static const int kReturnShadowIndex = 0;
2137 shadows.Add(new ShadowTarget(&function_return_));
2138 bool function_return_was_shadowed = function_return_is_shadowed_;
2139 function_return_is_shadowed_ = true;
2140 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2141
2142 // Add the remaining shadow targets.
2143 for (int i = 0; i < nof_escapes; i++) {
2144 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2145 }
2146
2147 // Generate code for the statements in the try block.
2148 VisitStatementsAndSpill(node->try_block()->statements());
2149
2150 // Stop the introduced shadowing and count the number of required unlinks.
2151 // After shadowing stops, the original labels are unshadowed and the
2152 // LabelShadows represent the formerly shadowing labels.
2153 int nof_unlinks = 0;
2154 for (int i = 0; i < shadows.length(); i++) {
2155 shadows[i]->StopShadowing();
2156 if (shadows[i]->is_linked()) nof_unlinks++;
2157 }
2158 function_return_is_shadowed_ = function_return_was_shadowed;
2159
2160 // Get an external reference to the handler address.
2161 ExternalReference handler_address(Top::k_handler_address);
2162
2163 // If we can fall off the end of the try block, unlink from the try
2164 // chain and set the state on the frame to FALLING.
2165 if (has_valid_frame()) {
2166 // The next handler address is on top of the frame.
2167 ASSERT(StackHandlerConstants::kNextOffset == 0);
2168 frame_->EmitPop(r1);
2169 __ mov(r3, Operand(handler_address));
2170 __ str(r1, MemOperand(r3));
2171 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2172
2173 // Fake a top of stack value (unneeded when FALLING) and set the
2174 // state in r2, then jump around the unlink blocks if any.
2175 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2176 frame_->EmitPush(r0);
2177 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2178 if (nof_unlinks > 0) {
2179 finally_block.Jump();
2180 }
2181 }
2182
2183 // Generate code to unlink and set the state for the (formerly)
2184 // shadowing targets that have been jumped to.
2185 for (int i = 0; i < shadows.length(); i++) {
2186 if (shadows[i]->is_linked()) {
2187 // If we have come from the shadowed return, the return value is
2188 // in (a non-refcounted reference to) r0. We must preserve it
2189 // until it is pushed.
2190 //
2191 // Because we can be jumping here (to spilled code) from
2192 // unspilled code, we need to reestablish a spilled frame at
2193 // this block.
2194 shadows[i]->Bind();
2195 frame_->SpillAll();
2196
2197 // Reload sp from the top handler, because some statements that
2198 // we break from (eg, for...in) may have left stuff on the
2199 // stack.
2200 __ mov(r3, Operand(handler_address));
2201 __ ldr(sp, MemOperand(r3));
2202 frame_->Forget(frame_->height() - handler_height);
2203
2204 // Unlink this handler and drop it from the frame. The next
2205 // handler address is currently on top of the frame.
2206 ASSERT(StackHandlerConstants::kNextOffset == 0);
2207 frame_->EmitPop(r1);
2208 __ str(r1, MemOperand(r3));
2209 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2210
2211 if (i == kReturnShadowIndex) {
2212 // If this label shadowed the function return, materialize the
2213 // return value on the stack.
2214 frame_->EmitPush(r0);
2215 } else {
2216 // Fake TOS for targets that shadowed breaks and continues.
2217 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2218 frame_->EmitPush(r0);
2219 }
2220 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2221 if (--nof_unlinks > 0) {
2222 // If this is not the last unlink block, jump around the next.
2223 finally_block.Jump();
2224 }
2225 }
2226 }
2227
2228 // --- Finally block ---
2229 finally_block.Bind();
2230
2231 // Push the state on the stack.
2232 frame_->EmitPush(r2);
2233
2234 // We keep two elements on the stack - the (possibly faked) result
2235 // and the state - while evaluating the finally block.
2236 //
2237 // Generate code for the statements in the finally block.
2238 VisitStatementsAndSpill(node->finally_block()->statements());
2239
2240 if (has_valid_frame()) {
2241 // Restore state and return value or faked TOS.
2242 frame_->EmitPop(r2);
2243 frame_->EmitPop(r0);
2244 }
2245
2246 // Generate code to jump to the right destination for all used
2247 // formerly shadowing targets. Deallocate each shadow target.
2248 for (int i = 0; i < shadows.length(); i++) {
2249 if (has_valid_frame() && shadows[i]->is_bound()) {
2250 JumpTarget* original = shadows[i]->other_target();
2251 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2252 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2253 JumpTarget skip;
2254 skip.Branch(ne);
2255 frame_->PrepareForReturn();
2256 original->Jump();
2257 skip.Bind();
2258 } else {
2259 original->Branch(eq);
2260 }
2261 }
2262 }
2263
2264 if (has_valid_frame()) {
2265 // Check if we need to rethrow the exception.
2266 JumpTarget exit;
2267 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2268 exit.Branch(ne);
2269
2270 // Rethrow exception.
2271 frame_->EmitPush(r0);
2272 frame_->CallRuntime(Runtime::kReThrow, 1);
2273
2274 // Done.
2275 exit.Bind();
2276 }
2277 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2278}
2279
2280
2281void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2282#ifdef DEBUG
2283 int original_height = frame_->height();
2284#endif
2285 VirtualFrame::SpilledScope spilled_scope;
2286 Comment cmnt(masm_, "[ DebuggerStatament");
2287 CodeForStatementPosition(node);
2288#ifdef ENABLE_DEBUGGER_SUPPORT
2289 frame_->CallRuntime(Runtime::kDebugBreak, 0);
2290#endif
2291 // Ignore the return value.
2292 ASSERT(frame_->height() == original_height);
2293}
2294
2295
2296void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2297 VirtualFrame::SpilledScope spilled_scope;
2298 ASSERT(boilerplate->IsBoilerplate());
2299
Steve Block3ce2e202009-11-05 08:53:23 +00002300 __ mov(r0, Operand(boilerplate));
Leon Clarkee46be812010-01-19 14:06:41 +00002301 // Use the fast case closure allocation code that allocates in new
2302 // space for nested functions that don't need literals cloning.
2303 if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
2304 FastNewClosureStub stub;
2305 frame_->EmitPush(r0);
2306 frame_->CallStub(&stub, 1);
2307 frame_->EmitPush(r0);
2308 } else {
2309 // Create a new closure.
2310 frame_->EmitPush(cp);
2311 frame_->EmitPush(r0);
2312 frame_->CallRuntime(Runtime::kNewClosure, 2);
2313 frame_->EmitPush(r0);
2314 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002315}
2316
2317
2318void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2319#ifdef DEBUG
2320 int original_height = frame_->height();
2321#endif
2322 VirtualFrame::SpilledScope spilled_scope;
2323 Comment cmnt(masm_, "[ FunctionLiteral");
2324
2325 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002326 Handle<JSFunction> boilerplate =
2327 Compiler::BuildBoilerplate(node, script_, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002328 // Check for stack-overflow exception.
2329 if (HasStackOverflow()) {
2330 ASSERT(frame_->height() == original_height);
2331 return;
2332 }
2333 InstantiateBoilerplate(boilerplate);
2334 ASSERT(frame_->height() == original_height + 1);
2335}
2336
2337
2338void CodeGenerator::VisitFunctionBoilerplateLiteral(
2339 FunctionBoilerplateLiteral* node) {
2340#ifdef DEBUG
2341 int original_height = frame_->height();
2342#endif
2343 VirtualFrame::SpilledScope spilled_scope;
2344 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2345 InstantiateBoilerplate(node->boilerplate());
2346 ASSERT(frame_->height() == original_height + 1);
2347}
2348
2349
2350void CodeGenerator::VisitConditional(Conditional* node) {
2351#ifdef DEBUG
2352 int original_height = frame_->height();
2353#endif
2354 VirtualFrame::SpilledScope spilled_scope;
2355 Comment cmnt(masm_, "[ Conditional");
2356 JumpTarget then;
2357 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002358 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002359 if (has_valid_frame()) {
2360 Branch(false, &else_);
2361 }
2362 if (has_valid_frame() || then.is_linked()) {
2363 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002364 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002365 }
2366 if (else_.is_linked()) {
2367 JumpTarget exit;
2368 if (has_valid_frame()) exit.Jump();
2369 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002370 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002371 if (exit.is_linked()) exit.Bind();
2372 }
2373 ASSERT(frame_->height() == original_height + 1);
2374}
2375
2376
2377void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2378 VirtualFrame::SpilledScope spilled_scope;
2379 if (slot->type() == Slot::LOOKUP) {
2380 ASSERT(slot->var()->is_dynamic());
2381
2382 JumpTarget slow;
2383 JumpTarget done;
2384
2385 // Generate fast-case code for variables that might be shadowed by
2386 // eval-introduced variables. Eval is used a lot without
2387 // introducing variables. In those cases, we do not want to
2388 // perform a runtime call for all variables in the scope
2389 // containing the eval.
2390 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2391 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2392 // If there was no control flow to slow, we can exit early.
2393 if (!slow.is_linked()) {
2394 frame_->EmitPush(r0);
2395 return;
2396 }
2397
2398 done.Jump();
2399
2400 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2401 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2402 // Only generate the fast case for locals that rewrite to slots.
2403 // This rules out argument loads.
2404 if (potential_slot != NULL) {
2405 __ ldr(r0,
2406 ContextSlotOperandCheckExtensions(potential_slot,
2407 r1,
2408 r2,
2409 &slow));
2410 if (potential_slot->var()->mode() == Variable::CONST) {
2411 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2412 __ cmp(r0, ip);
2413 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2414 }
2415 // There is always control flow to slow from
2416 // ContextSlotOperandCheckExtensions so we have to jump around
2417 // it.
2418 done.Jump();
2419 }
2420 }
2421
2422 slow.Bind();
2423 frame_->EmitPush(cp);
2424 __ mov(r0, Operand(slot->var()->name()));
2425 frame_->EmitPush(r0);
2426
2427 if (typeof_state == INSIDE_TYPEOF) {
2428 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2429 } else {
2430 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2431 }
2432
2433 done.Bind();
2434 frame_->EmitPush(r0);
2435
2436 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 // Special handling for locals allocated in registers.
2438 __ ldr(r0, SlotOperand(slot, r2));
2439 frame_->EmitPush(r0);
2440 if (slot->var()->mode() == Variable::CONST) {
2441 // Const slots may contain 'the hole' value (the constant hasn't been
2442 // initialized yet) which needs to be converted into the 'undefined'
2443 // value.
2444 Comment cmnt(masm_, "[ Unhole const");
2445 frame_->EmitPop(r0);
2446 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2447 __ cmp(r0, ip);
2448 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2449 frame_->EmitPush(r0);
2450 }
2451 }
2452}
2453
2454
Leon Clarkee46be812010-01-19 14:06:41 +00002455void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2456 ASSERT(slot != NULL);
2457 if (slot->type() == Slot::LOOKUP) {
2458 ASSERT(slot->var()->is_dynamic());
2459
2460 // For now, just do a runtime call.
2461 frame_->EmitPush(cp);
2462 __ mov(r0, Operand(slot->var()->name()));
2463 frame_->EmitPush(r0);
2464
2465 if (init_state == CONST_INIT) {
2466 // Same as the case for a normal store, but ignores attribute
2467 // (e.g. READ_ONLY) of context slot so that we can initialize
2468 // const properties (introduced via eval("const foo = (some
2469 // expr);")). Also, uses the current function context instead of
2470 // the top context.
2471 //
2472 // Note that we must declare the foo upon entry of eval(), via a
2473 // context slot declaration, but we cannot initialize it at the
2474 // same time, because the const declaration may be at the end of
2475 // the eval code (sigh...) and the const variable may have been
2476 // used before (where its value is 'undefined'). Thus, we can only
2477 // do the initialization when we actually encounter the expression
2478 // and when the expression operands are defined and valid, and
2479 // thus we need the split into 2 operations: declaration of the
2480 // context slot followed by initialization.
2481 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2482 } else {
2483 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2484 }
2485 // Storing a variable must keep the (new) value on the expression
2486 // stack. This is necessary for compiling assignment expressions.
2487 frame_->EmitPush(r0);
2488
2489 } else {
2490 ASSERT(!slot->var()->is_dynamic());
2491
2492 JumpTarget exit;
2493 if (init_state == CONST_INIT) {
2494 ASSERT(slot->var()->mode() == Variable::CONST);
2495 // Only the first const initialization must be executed (the slot
2496 // still contains 'the hole' value). When the assignment is
2497 // executed, the code is identical to a normal store (see below).
2498 Comment cmnt(masm_, "[ Init const");
2499 __ ldr(r2, SlotOperand(slot, r2));
2500 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2501 __ cmp(r2, ip);
2502 exit.Branch(ne);
2503 }
2504
2505 // We must execute the store. Storing a variable must keep the
2506 // (new) value on the stack. This is necessary for compiling
2507 // assignment expressions.
2508 //
2509 // Note: We will reach here even with slot->var()->mode() ==
2510 // Variable::CONST because of const declarations which will
2511 // initialize consts to 'the hole' value and by doing so, end up
2512 // calling this code. r2 may be loaded with context; used below in
2513 // RecordWrite.
2514 frame_->EmitPop(r0);
2515 __ str(r0, SlotOperand(slot, r2));
2516 frame_->EmitPush(r0);
2517 if (slot->type() == Slot::CONTEXT) {
2518 // Skip write barrier if the written value is a smi.
2519 __ tst(r0, Operand(kSmiTagMask));
2520 exit.Branch(eq);
2521 // r2 is loaded with context when calling SlotOperand above.
2522 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2523 __ mov(r3, Operand(offset));
2524 __ RecordWrite(r2, r3, r1);
2525 }
2526 // If we definitely did not jump over the assignment, we do not need
2527 // to bind the exit label. Doing so can defeat peephole
2528 // optimization.
2529 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
2530 exit.Bind();
2531 }
2532 }
2533}
2534
2535
Steve Blocka7e24c12009-10-30 11:49:00 +00002536void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2537 TypeofState typeof_state,
2538 Register tmp,
2539 Register tmp2,
2540 JumpTarget* slow) {
2541 // Check that no extension objects have been created by calls to
2542 // eval from the current scope to the global scope.
2543 Register context = cp;
2544 Scope* s = scope();
2545 while (s != NULL) {
2546 if (s->num_heap_slots() > 0) {
2547 if (s->calls_eval()) {
2548 // Check that extension is NULL.
2549 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2550 __ tst(tmp2, tmp2);
2551 slow->Branch(ne);
2552 }
2553 // Load next context in chain.
2554 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2555 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2556 context = tmp;
2557 }
2558 // If no outer scope calls eval, we do not need to check more
2559 // context extensions.
2560 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2561 s = s->outer_scope();
2562 }
2563
2564 if (s->is_eval_scope()) {
2565 Label next, fast;
2566 if (!context.is(tmp)) {
2567 __ mov(tmp, Operand(context));
2568 }
2569 __ bind(&next);
2570 // Terminate at global context.
2571 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2572 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2573 __ cmp(tmp2, ip);
2574 __ b(eq, &fast);
2575 // Check that extension is NULL.
2576 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2577 __ tst(tmp2, tmp2);
2578 slow->Branch(ne);
2579 // Load next context in chain.
2580 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2581 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2582 __ b(&next);
2583 __ bind(&fast);
2584 }
2585
2586 // All extension objects were empty and it is safe to use a global
2587 // load IC call.
2588 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2589 // Load the global object.
2590 LoadGlobal();
2591 // Setup the name register.
2592 Result name(r2);
2593 __ mov(r2, Operand(slot->var()->name()));
2594 // Call IC stub.
2595 if (typeof_state == INSIDE_TYPEOF) {
2596 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
2597 } else {
2598 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
2599 }
2600
2601 // Drop the global object. The result is in r0.
2602 frame_->Drop();
2603}
2604
2605
2606void CodeGenerator::VisitSlot(Slot* node) {
2607#ifdef DEBUG
2608 int original_height = frame_->height();
2609#endif
2610 VirtualFrame::SpilledScope spilled_scope;
2611 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002612 LoadFromSlot(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002613 ASSERT(frame_->height() == original_height + 1);
2614}
2615
2616
2617void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2618#ifdef DEBUG
2619 int original_height = frame_->height();
2620#endif
2621 VirtualFrame::SpilledScope spilled_scope;
2622 Comment cmnt(masm_, "[ VariableProxy");
2623
2624 Variable* var = node->var();
2625 Expression* expr = var->rewrite();
2626 if (expr != NULL) {
2627 Visit(expr);
2628 } else {
2629 ASSERT(var->is_global());
2630 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002631 ref.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002632 }
2633 ASSERT(frame_->height() == original_height + 1);
2634}
2635
2636
2637void CodeGenerator::VisitLiteral(Literal* node) {
2638#ifdef DEBUG
2639 int original_height = frame_->height();
2640#endif
2641 VirtualFrame::SpilledScope spilled_scope;
2642 Comment cmnt(masm_, "[ Literal");
2643 __ mov(r0, Operand(node->handle()));
2644 frame_->EmitPush(r0);
2645 ASSERT(frame_->height() == original_height + 1);
2646}
2647
2648
2649void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2650#ifdef DEBUG
2651 int original_height = frame_->height();
2652#endif
2653 VirtualFrame::SpilledScope spilled_scope;
2654 Comment cmnt(masm_, "[ RexExp Literal");
2655
2656 // Retrieve the literal array and check the allocated entry.
2657
2658 // Load the function of this activation.
2659 __ ldr(r1, frame_->Function());
2660
2661 // Load the literals array of the function.
2662 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2663
2664 // Load the literal at the ast saved index.
2665 int literal_offset =
2666 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2667 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2668
2669 JumpTarget done;
2670 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2671 __ cmp(r2, ip);
2672 done.Branch(ne);
2673
2674 // If the entry is undefined we call the runtime system to computed
2675 // the literal.
2676 frame_->EmitPush(r1); // literal array (0)
2677 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2678 frame_->EmitPush(r0); // literal index (1)
2679 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2680 frame_->EmitPush(r0);
2681 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2682 frame_->EmitPush(r0);
2683 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2684 __ mov(r2, Operand(r0));
2685
2686 done.Bind();
2687 // Push the literal.
2688 frame_->EmitPush(r2);
2689 ASSERT(frame_->height() == original_height + 1);
2690}
2691
2692
Steve Blocka7e24c12009-10-30 11:49:00 +00002693void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2694#ifdef DEBUG
2695 int original_height = frame_->height();
2696#endif
2697 VirtualFrame::SpilledScope spilled_scope;
2698 Comment cmnt(masm_, "[ ObjectLiteral");
2699
Steve Blocka7e24c12009-10-30 11:49:00 +00002700 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002701 __ ldr(r2, frame_->Function());
2702 // Literal array.
2703 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2704 // Literal index.
2705 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2706 // Constant properties.
2707 __ mov(r0, Operand(node->constant_properties()));
2708 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2709 if (node->depth() > 1) {
2710 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
2711 } else {
2712 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002713 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002714 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002715 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002716
2717 for (int i = 0; i < node->properties()->length(); i++) {
2718 ObjectLiteral::Property* property = node->properties()->at(i);
2719 Literal* key = property->key();
2720 Expression* value = property->value();
2721 switch (property->kind()) {
2722 case ObjectLiteral::Property::CONSTANT:
2723 break;
2724 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2725 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2726 // else fall through
2727 case ObjectLiteral::Property::COMPUTED: // fall through
2728 case ObjectLiteral::Property::PROTOTYPE: {
2729 frame_->EmitPush(r0); // dup the result
2730 LoadAndSpill(key);
2731 LoadAndSpill(value);
2732 frame_->CallRuntime(Runtime::kSetProperty, 3);
2733 // restore r0
2734 __ ldr(r0, frame_->Top());
2735 break;
2736 }
2737 case ObjectLiteral::Property::SETTER: {
2738 frame_->EmitPush(r0);
2739 LoadAndSpill(key);
2740 __ mov(r0, Operand(Smi::FromInt(1)));
2741 frame_->EmitPush(r0);
2742 LoadAndSpill(value);
2743 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2744 __ ldr(r0, frame_->Top());
2745 break;
2746 }
2747 case ObjectLiteral::Property::GETTER: {
2748 frame_->EmitPush(r0);
2749 LoadAndSpill(key);
2750 __ mov(r0, Operand(Smi::FromInt(0)));
2751 frame_->EmitPush(r0);
2752 LoadAndSpill(value);
2753 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2754 __ ldr(r0, frame_->Top());
2755 break;
2756 }
2757 }
2758 }
2759 ASSERT(frame_->height() == original_height + 1);
2760}
2761
2762
Steve Blocka7e24c12009-10-30 11:49:00 +00002763void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2764#ifdef DEBUG
2765 int original_height = frame_->height();
2766#endif
2767 VirtualFrame::SpilledScope spilled_scope;
2768 Comment cmnt(masm_, "[ ArrayLiteral");
2769
Steve Blocka7e24c12009-10-30 11:49:00 +00002770 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002771 __ ldr(r2, frame_->Function());
2772 // Literals array.
2773 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2774 // Literal index.
2775 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2776 // Constant elements.
2777 __ mov(r0, Operand(node->constant_elements()));
2778 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2779 if (node->depth() > 1) {
2780 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2781 } else {
2782 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002783 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002784 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002785 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002786
2787 // Generate code to set the elements in the array that are not
2788 // literals.
2789 for (int i = 0; i < node->values()->length(); i++) {
2790 Expression* value = node->values()->at(i);
2791
2792 // If value is a literal the property value is already set in the
2793 // boilerplate object.
2794 if (value->AsLiteral() != NULL) continue;
2795 // If value is a materialized literal the property value is already set
2796 // in the boilerplate object if it is simple.
2797 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2798
2799 // The property must be set by generated code.
2800 LoadAndSpill(value);
2801 frame_->EmitPop(r0);
2802
2803 // Fetch the object literal.
2804 __ ldr(r1, frame_->Top());
2805 // Get the elements array.
2806 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2807
2808 // Write to the indexed properties array.
2809 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2810 __ str(r0, FieldMemOperand(r1, offset));
2811
2812 // Update the write barrier for the array address.
2813 __ mov(r3, Operand(offset));
2814 __ RecordWrite(r1, r3, r2);
2815 }
2816 ASSERT(frame_->height() == original_height + 1);
2817}
2818
2819
2820void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2821#ifdef DEBUG
2822 int original_height = frame_->height();
2823#endif
2824 VirtualFrame::SpilledScope spilled_scope;
2825 // Call runtime routine to allocate the catch extension object and
2826 // assign the exception value to the catch variable.
2827 Comment cmnt(masm_, "[ CatchExtensionObject");
2828 LoadAndSpill(node->key());
2829 LoadAndSpill(node->value());
2830 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2831 frame_->EmitPush(r0);
2832 ASSERT(frame_->height() == original_height + 1);
2833}
2834
2835
2836void CodeGenerator::VisitAssignment(Assignment* node) {
2837#ifdef DEBUG
2838 int original_height = frame_->height();
2839#endif
2840 VirtualFrame::SpilledScope spilled_scope;
2841 Comment cmnt(masm_, "[ Assignment");
2842
Leon Clarked91b9f72010-01-27 17:25:45 +00002843 { Reference target(this, node->target(), node->is_compound());
Steve Blocka7e24c12009-10-30 11:49:00 +00002844 if (target.is_illegal()) {
2845 // Fool the virtual frame into thinking that we left the assignment's
2846 // value on the frame.
2847 __ mov(r0, Operand(Smi::FromInt(0)));
2848 frame_->EmitPush(r0);
2849 ASSERT(frame_->height() == original_height + 1);
2850 return;
2851 }
2852
2853 if (node->op() == Token::ASSIGN ||
2854 node->op() == Token::INIT_VAR ||
2855 node->op() == Token::INIT_CONST) {
2856 LoadAndSpill(node->value());
2857
Leon Clarked91b9f72010-01-27 17:25:45 +00002858 } else { // Assignment is a compound assignment.
Steve Blocka7e24c12009-10-30 11:49:00 +00002859 // Get the old value of the lhs.
Steve Blockd0582a62009-12-15 09:54:21 +00002860 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002861 Literal* literal = node->value()->AsLiteral();
2862 bool overwrite =
2863 (node->value()->AsBinaryOperation() != NULL &&
2864 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2865 if (literal != NULL && literal->handle()->IsSmi()) {
2866 SmiOperation(node->binary_op(),
2867 literal->handle(),
2868 false,
2869 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2870 frame_->EmitPush(r0);
2871
2872 } else {
2873 LoadAndSpill(node->value());
2874 GenericBinaryOperation(node->binary_op(),
2875 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2876 frame_->EmitPush(r0);
2877 }
2878 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002879 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2880 if (var != NULL &&
2881 (var->mode() == Variable::CONST) &&
2882 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2883 // Assignment ignored - leave the value on the stack.
Leon Clarked91b9f72010-01-27 17:25:45 +00002884 UnloadReference(&target);
Steve Blocka7e24c12009-10-30 11:49:00 +00002885 } else {
2886 CodeForSourcePosition(node->position());
2887 if (node->op() == Token::INIT_CONST) {
2888 // Dynamic constant initializations must use the function context
2889 // and initialize the actual constant declared. Dynamic variable
2890 // initializations are simply assignments and use SetValue.
2891 target.SetValue(CONST_INIT);
2892 } else {
2893 target.SetValue(NOT_CONST_INIT);
2894 }
2895 }
2896 }
2897 ASSERT(frame_->height() == original_height + 1);
2898}
2899
2900
2901void CodeGenerator::VisitThrow(Throw* node) {
2902#ifdef DEBUG
2903 int original_height = frame_->height();
2904#endif
2905 VirtualFrame::SpilledScope spilled_scope;
2906 Comment cmnt(masm_, "[ Throw");
2907
2908 LoadAndSpill(node->exception());
2909 CodeForSourcePosition(node->position());
2910 frame_->CallRuntime(Runtime::kThrow, 1);
2911 frame_->EmitPush(r0);
2912 ASSERT(frame_->height() == original_height + 1);
2913}
2914
2915
2916void CodeGenerator::VisitProperty(Property* node) {
2917#ifdef DEBUG
2918 int original_height = frame_->height();
2919#endif
2920 VirtualFrame::SpilledScope spilled_scope;
2921 Comment cmnt(masm_, "[ Property");
2922
2923 { Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002924 property.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002925 }
2926 ASSERT(frame_->height() == original_height + 1);
2927}
2928
2929
2930void CodeGenerator::VisitCall(Call* node) {
2931#ifdef DEBUG
2932 int original_height = frame_->height();
2933#endif
2934 VirtualFrame::SpilledScope spilled_scope;
2935 Comment cmnt(masm_, "[ Call");
2936
2937 Expression* function = node->expression();
2938 ZoneList<Expression*>* args = node->arguments();
2939
2940 // Standard function call.
2941 // Check if the function is a variable or a property.
2942 Variable* var = function->AsVariableProxy()->AsVariable();
2943 Property* property = function->AsProperty();
2944
2945 // ------------------------------------------------------------------------
2946 // Fast-case: Use inline caching.
2947 // ---
2948 // According to ECMA-262, section 11.2.3, page 44, the function to call
2949 // must be resolved after the arguments have been evaluated. The IC code
2950 // automatically handles this by loading the arguments before the function
2951 // is resolved in cache misses (this also holds for megamorphic calls).
2952 // ------------------------------------------------------------------------
2953
2954 if (var != NULL && var->is_possibly_eval()) {
2955 // ----------------------------------
2956 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2957 // ----------------------------------
2958
2959 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2960 // resolve the function we need to call and the receiver of the
2961 // call. Then we call the resolved function using the given
2962 // arguments.
2963 // Prepare stack for call to resolved function.
2964 LoadAndSpill(function);
2965 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2966 frame_->EmitPush(r2); // Slot for receiver
2967 int arg_count = args->length();
2968 for (int i = 0; i < arg_count; i++) {
2969 LoadAndSpill(args->at(i));
2970 }
2971
2972 // Prepare stack for call to ResolvePossiblyDirectEval.
2973 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2974 frame_->EmitPush(r1);
2975 if (arg_count > 0) {
2976 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
2977 frame_->EmitPush(r1);
2978 } else {
2979 frame_->EmitPush(r2);
2980 }
2981
Leon Clarkee46be812010-01-19 14:06:41 +00002982 // Push the receiver.
2983 __ ldr(r1, frame_->Receiver());
2984 frame_->EmitPush(r1);
2985
Steve Blocka7e24c12009-10-30 11:49:00 +00002986 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00002987 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002988
2989 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00002990 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00002991 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
2992
2993 // Call the function.
2994 CodeForSourcePosition(node->position());
2995
2996 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00002997 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002998 frame_->CallStub(&call_function, arg_count + 1);
2999
3000 __ ldr(cp, frame_->Context());
3001 // Remove the function from the stack.
3002 frame_->Drop();
3003 frame_->EmitPush(r0);
3004
3005 } else if (var != NULL && !var->is_this() && var->is_global()) {
3006 // ----------------------------------
3007 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3008 // ----------------------------------
3009
3010 // Push the name of the function and the receiver onto the stack.
3011 __ mov(r0, Operand(var->name()));
3012 frame_->EmitPush(r0);
3013
3014 // Pass the global object as the receiver and let the IC stub
3015 // patch the stack to use the global proxy as 'this' in the
3016 // invoked function.
3017 LoadGlobal();
3018
3019 // Load the arguments.
3020 int arg_count = args->length();
3021 for (int i = 0; i < arg_count; i++) {
3022 LoadAndSpill(args->at(i));
3023 }
3024
3025 // Setup the receiver register and call the IC initialization code.
3026 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3027 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3028 CodeForSourcePosition(node->position());
3029 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3030 arg_count + 1);
3031 __ ldr(cp, frame_->Context());
3032 // Remove the function from the stack.
3033 frame_->Drop();
3034 frame_->EmitPush(r0);
3035
3036 } else if (var != NULL && var->slot() != NULL &&
3037 var->slot()->type() == Slot::LOOKUP) {
3038 // ----------------------------------
3039 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3040 // ----------------------------------
3041
3042 // Load the function
3043 frame_->EmitPush(cp);
3044 __ mov(r0, Operand(var->name()));
3045 frame_->EmitPush(r0);
3046 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3047 // r0: slot value; r1: receiver
3048
3049 // Load the receiver.
3050 frame_->EmitPush(r0); // function
3051 frame_->EmitPush(r1); // receiver
3052
3053 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003054 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003055 frame_->EmitPush(r0);
3056
3057 } else if (property != NULL) {
3058 // Check if the key is a literal string.
3059 Literal* literal = property->key()->AsLiteral();
3060
3061 if (literal != NULL && literal->handle()->IsSymbol()) {
3062 // ------------------------------------------------------------------
3063 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3064 // ------------------------------------------------------------------
3065
3066 // Push the name of the function and the receiver onto the stack.
3067 __ mov(r0, Operand(literal->handle()));
3068 frame_->EmitPush(r0);
3069 LoadAndSpill(property->obj());
3070
3071 // Load the arguments.
3072 int arg_count = args->length();
3073 for (int i = 0; i < arg_count; i++) {
3074 LoadAndSpill(args->at(i));
3075 }
3076
3077 // Set the receiver register and call the IC initialization code.
3078 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3079 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3080 CodeForSourcePosition(node->position());
3081 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3082 __ ldr(cp, frame_->Context());
3083
3084 // Remove the function from the stack.
3085 frame_->Drop();
3086
3087 frame_->EmitPush(r0); // push after get rid of function from the stack
3088
3089 } else {
3090 // -------------------------------------------
3091 // JavaScript example: 'array[index](1, 2, 3)'
3092 // -------------------------------------------
3093
Leon Clarked91b9f72010-01-27 17:25:45 +00003094 LoadAndSpill(property->obj());
3095 LoadAndSpill(property->key());
3096 EmitKeyedLoad(false);
3097 frame_->Drop(); // key
3098 // Put the function below the receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003099 if (property->is_synthetic()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00003100 // Use the global receiver.
3101 frame_->Drop();
3102 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003103 LoadGlobalReceiver(r0);
3104 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00003105 frame_->EmitPop(r1); // receiver
3106 frame_->EmitPush(r0); // function
3107 frame_->EmitPush(r1); // receiver
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 }
3109
3110 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003111 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003112 frame_->EmitPush(r0);
3113 }
3114
3115 } else {
3116 // ----------------------------------
3117 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3118 // ----------------------------------
3119
3120 // Load the function.
3121 LoadAndSpill(function);
3122
3123 // Pass the global proxy as the receiver.
3124 LoadGlobalReceiver(r0);
3125
3126 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003127 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003128 frame_->EmitPush(r0);
3129 }
3130 ASSERT(frame_->height() == original_height + 1);
3131}
3132
3133
3134void CodeGenerator::VisitCallNew(CallNew* node) {
3135#ifdef DEBUG
3136 int original_height = frame_->height();
3137#endif
3138 VirtualFrame::SpilledScope spilled_scope;
3139 Comment cmnt(masm_, "[ CallNew");
3140
3141 // According to ECMA-262, section 11.2.2, page 44, the function
3142 // expression in new calls must be evaluated before the
3143 // arguments. This is different from ordinary calls, where the
3144 // actual function to call is resolved after the arguments have been
3145 // evaluated.
3146
3147 // Compute function to call and use the global object as the
3148 // receiver. There is no need to use the global proxy here because
3149 // it will always be replaced with a newly allocated object.
3150 LoadAndSpill(node->expression());
3151 LoadGlobal();
3152
3153 // Push the arguments ("left-to-right") on the stack.
3154 ZoneList<Expression*>* args = node->arguments();
3155 int arg_count = args->length();
3156 for (int i = 0; i < arg_count; i++) {
3157 LoadAndSpill(args->at(i));
3158 }
3159
3160 // r0: the number of arguments.
3161 Result num_args(r0);
3162 __ mov(r0, Operand(arg_count));
3163
3164 // Load the function into r1 as per calling convention.
3165 Result function(r1);
3166 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3167
3168 // Call the construct call builtin that handles allocation and
3169 // constructor invocation.
3170 CodeForSourcePosition(node->position());
3171 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
3172 frame_->CallCodeObject(ic,
3173 RelocInfo::CONSTRUCT_CALL,
3174 &num_args,
3175 &function,
3176 arg_count + 1);
3177
3178 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3179 __ str(r0, frame_->Top());
3180 ASSERT(frame_->height() == original_height + 1);
3181}
3182
3183
3184void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3185 VirtualFrame::SpilledScope spilled_scope;
3186 ASSERT(args->length() == 1);
3187 JumpTarget leave, null, function, non_function_constructor;
3188
3189 // Load the object into r0.
3190 LoadAndSpill(args->at(0));
3191 frame_->EmitPop(r0);
3192
3193 // If the object is a smi, we return null.
3194 __ tst(r0, Operand(kSmiTagMask));
3195 null.Branch(eq);
3196
3197 // Check that the object is a JS object but take special care of JS
3198 // functions to make sure they have 'Function' as their class.
3199 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3200 null.Branch(lt);
3201
3202 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3203 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3204 // LAST_JS_OBJECT_TYPE.
3205 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3206 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3207 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3208 function.Branch(eq);
3209
3210 // Check if the constructor in the map is a function.
3211 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3212 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3213 non_function_constructor.Branch(ne);
3214
3215 // The r0 register now contains the constructor function. Grab the
3216 // instance class name from there.
3217 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3218 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3219 frame_->EmitPush(r0);
3220 leave.Jump();
3221
3222 // Functions have class 'Function'.
3223 function.Bind();
3224 __ mov(r0, Operand(Factory::function_class_symbol()));
3225 frame_->EmitPush(r0);
3226 leave.Jump();
3227
3228 // Objects with a non-function constructor have class 'Object'.
3229 non_function_constructor.Bind();
3230 __ mov(r0, Operand(Factory::Object_symbol()));
3231 frame_->EmitPush(r0);
3232 leave.Jump();
3233
3234 // Non-JS objects have class null.
3235 null.Bind();
3236 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3237 frame_->EmitPush(r0);
3238
3239 // All done.
3240 leave.Bind();
3241}
3242
3243
3244void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3245 VirtualFrame::SpilledScope spilled_scope;
3246 ASSERT(args->length() == 1);
3247 JumpTarget leave;
3248 LoadAndSpill(args->at(0));
3249 frame_->EmitPop(r0); // r0 contains object.
3250 // if (object->IsSmi()) return the object.
3251 __ tst(r0, Operand(kSmiTagMask));
3252 leave.Branch(eq);
3253 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3254 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3255 leave.Branch(ne);
3256 // Load the value.
3257 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3258 leave.Bind();
3259 frame_->EmitPush(r0);
3260}
3261
3262
3263void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3264 VirtualFrame::SpilledScope spilled_scope;
3265 ASSERT(args->length() == 2);
3266 JumpTarget leave;
3267 LoadAndSpill(args->at(0)); // Load the object.
3268 LoadAndSpill(args->at(1)); // Load the value.
3269 frame_->EmitPop(r0); // r0 contains value
3270 frame_->EmitPop(r1); // r1 contains object
3271 // if (object->IsSmi()) return object.
3272 __ tst(r1, Operand(kSmiTagMask));
3273 leave.Branch(eq);
3274 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3275 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3276 leave.Branch(ne);
3277 // Store the value.
3278 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3279 // Update the write barrier.
3280 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3281 __ RecordWrite(r1, r2, r3);
3282 // Leave.
3283 leave.Bind();
3284 frame_->EmitPush(r0);
3285}
3286
3287
3288void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3289 VirtualFrame::SpilledScope spilled_scope;
3290 ASSERT(args->length() == 1);
3291 LoadAndSpill(args->at(0));
3292 frame_->EmitPop(r0);
3293 __ tst(r0, Operand(kSmiTagMask));
3294 cc_reg_ = eq;
3295}
3296
3297
3298void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3299 VirtualFrame::SpilledScope spilled_scope;
3300 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3301 ASSERT_EQ(args->length(), 3);
3302#ifdef ENABLE_LOGGING_AND_PROFILING
3303 if (ShouldGenerateLog(args->at(0))) {
3304 LoadAndSpill(args->at(1));
3305 LoadAndSpill(args->at(2));
3306 __ CallRuntime(Runtime::kLog, 2);
3307 }
3308#endif
3309 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3310 frame_->EmitPush(r0);
3311}
3312
3313
3314void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3315 VirtualFrame::SpilledScope spilled_scope;
3316 ASSERT(args->length() == 1);
3317 LoadAndSpill(args->at(0));
3318 frame_->EmitPop(r0);
3319 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3320 cc_reg_ = eq;
3321}
3322
3323
3324// This should generate code that performs a charCodeAt() call or returns
3325// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3326// It is not yet implemented on ARM, so it always goes to the slow case.
3327void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3328 VirtualFrame::SpilledScope spilled_scope;
3329 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003330 Comment(masm_, "[ GenerateFastCharCodeAt");
3331
3332 LoadAndSpill(args->at(0));
3333 LoadAndSpill(args->at(1));
3334 frame_->EmitPop(r0); // Index.
3335 frame_->EmitPop(r1); // String.
3336
3337 Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
3338
3339 __ tst(r1, Operand(kSmiTagMask));
3340 __ b(eq, &slow); // The 'string' was a Smi.
3341
3342 ASSERT(kSmiTag == 0);
3343 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3344 __ b(ne, &slow); // The index was negative or not a Smi.
3345
3346 __ bind(&try_again_with_new_string);
3347 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
3348 __ b(ge, &slow);
3349
3350 // Now r2 has the string type.
3351 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
3352 // Now r3 has the length of the string. Compare with the index.
3353 __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
3354 __ b(le, &slow);
3355
3356 // Here we know the index is in range. Check that string is sequential.
3357 ASSERT_EQ(0, kSeqStringTag);
3358 __ tst(r2, Operand(kStringRepresentationMask));
3359 __ b(ne, &not_a_flat_string);
3360
3361 // Check whether it is an ASCII string.
3362 ASSERT_EQ(0, kTwoByteStringTag);
3363 __ tst(r2, Operand(kStringEncodingMask));
3364 __ b(ne, &ascii_string);
3365
3366 // 2-byte string. We can add without shifting since the Smi tag size is the
3367 // log2 of the number of bytes in a two-byte character.
3368 ASSERT_EQ(1, kSmiTagSize);
3369 ASSERT_EQ(0, kSmiShiftSize);
3370 __ add(r1, r1, Operand(r0));
3371 __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
3372 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3373 __ jmp(&end);
3374
3375 __ bind(&ascii_string);
3376 __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
3377 __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
3378 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3379 __ jmp(&end);
3380
3381 __ bind(&not_a_flat_string);
3382 __ and_(r2, r2, Operand(kStringRepresentationMask));
3383 __ cmp(r2, Operand(kConsStringTag));
3384 __ b(ne, &slow);
3385
3386 // ConsString.
3387 // Check that the right hand side is the empty string (ie if this is really a
3388 // flat string in a cons string). If that is not the case we would rather go
3389 // to the runtime system now, to flatten the string.
3390 __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
3391 __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
3392 __ cmp(r2, Operand(r3));
3393 __ b(ne, &slow);
3394
3395 // Get the first of the two strings.
3396 __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
3397 __ jmp(&try_again_with_new_string);
3398
3399 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00003401
3402 __ bind(&end);
Steve Blocka7e24c12009-10-30 11:49:00 +00003403 frame_->EmitPush(r0);
3404}
3405
3406
3407void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3408 VirtualFrame::SpilledScope spilled_scope;
3409 ASSERT(args->length() == 1);
3410 LoadAndSpill(args->at(0));
3411 JumpTarget answer;
3412 // We need the CC bits to come out as not_equal in the case where the
3413 // object is a smi. This can't be done with the usual test opcode so
3414 // we use XOR to get the right CC bits.
3415 frame_->EmitPop(r0);
3416 __ and_(r1, r0, Operand(kSmiTagMask));
3417 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3418 answer.Branch(ne);
3419 // It is a heap object - get the map. Check if the object is a JS array.
3420 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3421 answer.Bind();
3422 cc_reg_ = eq;
3423}
3424
3425
Steve Blockd0582a62009-12-15 09:54:21 +00003426void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3427 // This generates a fast version of:
3428 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3429 VirtualFrame::SpilledScope spilled_scope;
3430 ASSERT(args->length() == 1);
3431 LoadAndSpill(args->at(0));
3432 frame_->EmitPop(r1);
3433 __ tst(r1, Operand(kSmiTagMask));
3434 false_target()->Branch(eq);
3435
3436 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3437 __ cmp(r1, ip);
3438 true_target()->Branch(eq);
3439
3440 Register map_reg = r2;
3441 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
3442 // Undetectable objects behave like undefined when tested with typeof.
3443 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
3444 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
3445 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
3446 false_target()->Branch(eq);
3447
3448 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
3449 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
3450 false_target()->Branch(lt);
3451 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
3452 cc_reg_ = le;
3453}
3454
3455
3456void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3457 // This generates a fast version of:
3458 // (%_ClassOf(arg) === 'Function')
3459 VirtualFrame::SpilledScope spilled_scope;
3460 ASSERT(args->length() == 1);
3461 LoadAndSpill(args->at(0));
3462 frame_->EmitPop(r0);
3463 __ tst(r0, Operand(kSmiTagMask));
3464 false_target()->Branch(eq);
3465 Register map_reg = r2;
3466 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
3467 cc_reg_ = eq;
3468}
3469
3470
Leon Clarked91b9f72010-01-27 17:25:45 +00003471void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3472 VirtualFrame::SpilledScope spilled_scope;
3473 ASSERT(args->length() == 1);
3474 LoadAndSpill(args->at(0));
3475 frame_->EmitPop(r0);
3476 __ tst(r0, Operand(kSmiTagMask));
3477 false_target()->Branch(eq);
3478 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3479 __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
3480 __ tst(r1, Operand(1 << Map::kIsUndetectable));
3481 cc_reg_ = ne;
3482}
3483
3484
Steve Blocka7e24c12009-10-30 11:49:00 +00003485void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3486 VirtualFrame::SpilledScope spilled_scope;
3487 ASSERT(args->length() == 0);
3488
3489 // Get the frame pointer for the calling frame.
3490 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3491
3492 // Skip the arguments adaptor frame if it exists.
3493 Label check_frame_marker;
3494 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3495 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3496 __ b(ne, &check_frame_marker);
3497 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3498
3499 // Check the marker in the calling frame.
3500 __ bind(&check_frame_marker);
3501 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3502 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3503 cc_reg_ = eq;
3504}
3505
3506
3507void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3508 VirtualFrame::SpilledScope spilled_scope;
3509 ASSERT(args->length() == 0);
3510
3511 // Seed the result with the formal parameters count, which will be used
3512 // in case no arguments adaptor frame is found below the current frame.
3513 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3514
3515 // Call the shared stub to get to the arguments.length.
3516 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3517 frame_->CallStub(&stub, 0);
3518 frame_->EmitPush(r0);
3519}
3520
3521
3522void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3523 VirtualFrame::SpilledScope spilled_scope;
3524 ASSERT(args->length() == 1);
3525
3526 // Satisfy contract with ArgumentsAccessStub:
3527 // Load the key into r1 and the formal parameters count into r0.
3528 LoadAndSpill(args->at(0));
3529 frame_->EmitPop(r1);
3530 __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
3531
3532 // Call the shared stub to get to arguments[key].
3533 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3534 frame_->CallStub(&stub, 0);
3535 frame_->EmitPush(r0);
3536}
3537
3538
3539void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3540 VirtualFrame::SpilledScope spilled_scope;
3541 ASSERT(args->length() == 0);
3542 __ Call(ExternalReference::random_positive_smi_function().address(),
3543 RelocInfo::RUNTIME_ENTRY);
3544 frame_->EmitPush(r0);
3545}
3546
3547
Steve Blockd0582a62009-12-15 09:54:21 +00003548void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3549 ASSERT_EQ(2, args->length());
3550
3551 Load(args->at(0));
3552 Load(args->at(1));
3553
3554 frame_->CallRuntime(Runtime::kStringAdd, 2);
3555 frame_->EmitPush(r0);
3556}
3557
3558
Leon Clarkee46be812010-01-19 14:06:41 +00003559void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
3560 ASSERT_EQ(3, args->length());
3561
3562 Load(args->at(0));
3563 Load(args->at(1));
3564 Load(args->at(2));
3565
3566 frame_->CallRuntime(Runtime::kSubString, 3);
3567 frame_->EmitPush(r0);
3568}
3569
3570
3571void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
3572 ASSERT_EQ(2, args->length());
3573
3574 Load(args->at(0));
3575 Load(args->at(1));
3576
Leon Clarked91b9f72010-01-27 17:25:45 +00003577 StringCompareStub stub;
3578 frame_->CallStub(&stub, 2);
Leon Clarkee46be812010-01-19 14:06:41 +00003579 frame_->EmitPush(r0);
3580}
3581
3582
3583void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
3584 ASSERT_EQ(4, args->length());
3585
3586 Load(args->at(0));
3587 Load(args->at(1));
3588 Load(args->at(2));
3589 Load(args->at(3));
3590
3591 frame_->CallRuntime(Runtime::kRegExpExec, 4);
3592 frame_->EmitPush(r0);
3593}
3594
3595
Steve Blocka7e24c12009-10-30 11:49:00 +00003596void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3597 VirtualFrame::SpilledScope spilled_scope;
3598 ASSERT(args->length() == 2);
3599
3600 // Load the two objects into registers and perform the comparison.
3601 LoadAndSpill(args->at(0));
3602 LoadAndSpill(args->at(1));
3603 frame_->EmitPop(r0);
3604 frame_->EmitPop(r1);
3605 __ cmp(r0, Operand(r1));
3606 cc_reg_ = eq;
3607}
3608
3609
3610void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3611#ifdef DEBUG
3612 int original_height = frame_->height();
3613#endif
3614 VirtualFrame::SpilledScope spilled_scope;
3615 if (CheckForInlineRuntimeCall(node)) {
3616 ASSERT((has_cc() && frame_->height() == original_height) ||
3617 (!has_cc() && frame_->height() == original_height + 1));
3618 return;
3619 }
3620
3621 ZoneList<Expression*>* args = node->arguments();
3622 Comment cmnt(masm_, "[ CallRuntime");
3623 Runtime::Function* function = node->function();
3624
3625 if (function == NULL) {
3626 // Prepare stack for calling JS runtime function.
3627 __ mov(r0, Operand(node->name()));
3628 frame_->EmitPush(r0);
3629 // Push the builtins object found in the current global object.
3630 __ ldr(r1, GlobalObject());
3631 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3632 frame_->EmitPush(r0);
3633 }
3634
3635 // Push the arguments ("left-to-right").
3636 int arg_count = args->length();
3637 for (int i = 0; i < arg_count; i++) {
3638 LoadAndSpill(args->at(i));
3639 }
3640
3641 if (function == NULL) {
3642 // Call the JS runtime function.
3643 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3644 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3645 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3646 __ ldr(cp, frame_->Context());
3647 frame_->Drop();
3648 frame_->EmitPush(r0);
3649 } else {
3650 // Call the C runtime function.
3651 frame_->CallRuntime(function, arg_count);
3652 frame_->EmitPush(r0);
3653 }
3654 ASSERT(frame_->height() == original_height + 1);
3655}
3656
3657
3658void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3659#ifdef DEBUG
3660 int original_height = frame_->height();
3661#endif
3662 VirtualFrame::SpilledScope spilled_scope;
3663 Comment cmnt(masm_, "[ UnaryOperation");
3664
3665 Token::Value op = node->op();
3666
3667 if (op == Token::NOT) {
3668 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003669 false_target(),
3670 true_target(),
3671 true);
3672 // LoadCondition may (and usually does) leave a test and branch to
3673 // be emitted by the caller. In that case, negate the condition.
3674 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3675
3676 } else if (op == Token::DELETE) {
3677 Property* property = node->expression()->AsProperty();
3678 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3679 if (property != NULL) {
3680 LoadAndSpill(property->obj());
3681 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00003682 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003683
3684 } else if (variable != NULL) {
3685 Slot* slot = variable->slot();
3686 if (variable->is_global()) {
3687 LoadGlobal();
3688 __ mov(r0, Operand(variable->name()));
3689 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003690 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003691
3692 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3693 // lookup the context holding the named variable
3694 frame_->EmitPush(cp);
3695 __ mov(r0, Operand(variable->name()));
3696 frame_->EmitPush(r0);
3697 frame_->CallRuntime(Runtime::kLookupContext, 2);
3698 // r0: context
3699 frame_->EmitPush(r0);
3700 __ mov(r0, Operand(variable->name()));
3701 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003702 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003703
3704 } else {
3705 // Default: Result of deleting non-global, not dynamically
3706 // introduced variables is false.
3707 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3708 }
3709
3710 } else {
3711 // Default: Result of deleting expressions is true.
3712 LoadAndSpill(node->expression()); // may have side-effects
3713 frame_->Drop();
3714 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3715 }
3716 frame_->EmitPush(r0);
3717
3718 } else if (op == Token::TYPEOF) {
3719 // Special case for loading the typeof expression; see comment on
3720 // LoadTypeofExpression().
3721 LoadTypeofExpression(node->expression());
3722 frame_->CallRuntime(Runtime::kTypeof, 1);
3723 frame_->EmitPush(r0); // r0 has result
3724
3725 } else {
3726 LoadAndSpill(node->expression());
3727 frame_->EmitPop(r0);
3728 switch (op) {
3729 case Token::NOT:
3730 case Token::DELETE:
3731 case Token::TYPEOF:
3732 UNREACHABLE(); // handled above
3733 break;
3734
3735 case Token::SUB: {
3736 bool overwrite =
3737 (node->expression()->AsBinaryOperation() != NULL &&
3738 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Leon Clarkee46be812010-01-19 14:06:41 +00003739 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00003740 frame_->CallStub(&stub, 0);
3741 break;
3742 }
3743
3744 case Token::BIT_NOT: {
3745 // smi check
3746 JumpTarget smi_label;
3747 JumpTarget continue_label;
3748 __ tst(r0, Operand(kSmiTagMask));
3749 smi_label.Branch(eq);
3750
3751 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003752 frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003753
3754 continue_label.Jump();
3755 smi_label.Bind();
3756 __ mvn(r0, Operand(r0));
3757 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3758 continue_label.Bind();
3759 break;
3760 }
3761
3762 case Token::VOID:
3763 // since the stack top is cached in r0, popping and then
3764 // pushing a value can be done by just writing to r0.
3765 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3766 break;
3767
3768 case Token::ADD: {
3769 // Smi check.
3770 JumpTarget continue_label;
3771 __ tst(r0, Operand(kSmiTagMask));
3772 continue_label.Branch(eq);
3773 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003774 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003775 continue_label.Bind();
3776 break;
3777 }
3778 default:
3779 UNREACHABLE();
3780 }
3781 frame_->EmitPush(r0); // r0 has result
3782 }
3783 ASSERT(!has_valid_frame() ||
3784 (has_cc() && frame_->height() == original_height) ||
3785 (!has_cc() && frame_->height() == original_height + 1));
3786}
3787
3788
3789void CodeGenerator::VisitCountOperation(CountOperation* node) {
3790#ifdef DEBUG
3791 int original_height = frame_->height();
3792#endif
3793 VirtualFrame::SpilledScope spilled_scope;
3794 Comment cmnt(masm_, "[ CountOperation");
3795
3796 bool is_postfix = node->is_postfix();
3797 bool is_increment = node->op() == Token::INC;
3798
3799 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3800 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3801
3802 // Postfix: Make room for the result.
3803 if (is_postfix) {
3804 __ mov(r0, Operand(0));
3805 frame_->EmitPush(r0);
3806 }
3807
Leon Clarked91b9f72010-01-27 17:25:45 +00003808 // A constant reference is not saved to, so a constant reference is not a
3809 // compound assignment reference.
3810 { Reference target(this, node->expression(), !is_const);
Steve Blocka7e24c12009-10-30 11:49:00 +00003811 if (target.is_illegal()) {
3812 // Spoof the virtual frame to have the expected height (one higher
3813 // than on entry).
3814 if (!is_postfix) {
3815 __ mov(r0, Operand(Smi::FromInt(0)));
3816 frame_->EmitPush(r0);
3817 }
3818 ASSERT(frame_->height() == original_height + 1);
3819 return;
3820 }
Steve Blockd0582a62009-12-15 09:54:21 +00003821 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00003822 frame_->EmitPop(r0);
3823
3824 JumpTarget slow;
3825 JumpTarget exit;
3826
3827 // Load the value (1) into register r1.
3828 __ mov(r1, Operand(Smi::FromInt(1)));
3829
3830 // Check for smi operand.
3831 __ tst(r0, Operand(kSmiTagMask));
3832 slow.Branch(ne);
3833
3834 // Postfix: Store the old value as the result.
3835 if (is_postfix) {
3836 __ str(r0, frame_->ElementAt(target.size()));
3837 }
3838
3839 // Perform optimistic increment/decrement.
3840 if (is_increment) {
3841 __ add(r0, r0, Operand(r1), SetCC);
3842 } else {
3843 __ sub(r0, r0, Operand(r1), SetCC);
3844 }
3845
3846 // If the increment/decrement didn't overflow, we're done.
3847 exit.Branch(vc);
3848
3849 // Revert optimistic increment/decrement.
3850 if (is_increment) {
3851 __ sub(r0, r0, Operand(r1));
3852 } else {
3853 __ add(r0, r0, Operand(r1));
3854 }
3855
3856 // Slow case: Convert to number.
3857 slow.Bind();
3858 {
3859 // Convert the operand to a number.
3860 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003861 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003862 }
3863 if (is_postfix) {
3864 // Postfix: store to result (on the stack).
3865 __ str(r0, frame_->ElementAt(target.size()));
3866 }
3867
3868 // Compute the new value.
3869 __ mov(r1, Operand(Smi::FromInt(1)));
3870 frame_->EmitPush(r0);
3871 frame_->EmitPush(r1);
3872 if (is_increment) {
3873 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3874 } else {
3875 frame_->CallRuntime(Runtime::kNumberSub, 2);
3876 }
3877
3878 // Store the new value in the target if not const.
3879 exit.Bind();
3880 frame_->EmitPush(r0);
3881 if (!is_const) target.SetValue(NOT_CONST_INIT);
3882 }
3883
3884 // Postfix: Discard the new value and use the old.
3885 if (is_postfix) frame_->EmitPop(r0);
3886 ASSERT(frame_->height() == original_height + 1);
3887}
3888
3889
3890void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3891#ifdef DEBUG
3892 int original_height = frame_->height();
3893#endif
3894 VirtualFrame::SpilledScope spilled_scope;
3895 Comment cmnt(masm_, "[ BinaryOperation");
3896 Token::Value op = node->op();
3897
3898 // According to ECMA-262 section 11.11, page 58, the binary logical
3899 // operators must yield the result of one of the two expressions
3900 // before any ToBoolean() conversions. This means that the value
3901 // produced by a && or || operator is not necessarily a boolean.
3902
3903 // NOTE: If the left hand side produces a materialized value (not in
3904 // the CC register), we force the right hand side to do the
3905 // same. This is necessary because we may have to branch to the exit
3906 // after evaluating the left hand side (due to the shortcut
3907 // semantics), but the compiler must (statically) know if the result
3908 // of compiling the binary operation is materialized or not.
3909
3910 if (op == Token::AND) {
3911 JumpTarget is_true;
3912 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003913 &is_true,
3914 false_target(),
3915 false);
3916 if (has_valid_frame() && !has_cc()) {
3917 // The left-hand side result is on top of the virtual frame.
3918 JumpTarget pop_and_continue;
3919 JumpTarget exit;
3920
3921 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3922 frame_->EmitPush(r0);
3923 // Avoid popping the result if it converts to 'false' using the
3924 // standard ToBoolean() conversion as described in ECMA-262,
3925 // section 9.2, page 30.
3926 ToBoolean(&pop_and_continue, &exit);
3927 Branch(false, &exit);
3928
3929 // Pop the result of evaluating the first part.
3930 pop_and_continue.Bind();
3931 frame_->EmitPop(r0);
3932
3933 // Evaluate right side expression.
3934 is_true.Bind();
3935 LoadAndSpill(node->right());
3936
3937 // Exit (always with a materialized value).
3938 exit.Bind();
3939 } else if (has_cc() || is_true.is_linked()) {
3940 // The left-hand side is either (a) partially compiled to
3941 // control flow with a final branch left to emit or (b) fully
3942 // compiled to control flow and possibly true.
3943 if (has_cc()) {
3944 Branch(false, false_target());
3945 }
3946 is_true.Bind();
3947 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003948 true_target(),
3949 false_target(),
3950 false);
3951 } else {
3952 // Nothing to do.
3953 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
3954 }
3955
3956 } else if (op == Token::OR) {
3957 JumpTarget is_false;
3958 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003959 true_target(),
3960 &is_false,
3961 false);
3962 if (has_valid_frame() && !has_cc()) {
3963 // The left-hand side result is on top of the virtual frame.
3964 JumpTarget pop_and_continue;
3965 JumpTarget exit;
3966
3967 __ ldr(r0, frame_->Top());
3968 frame_->EmitPush(r0);
3969 // Avoid popping the result if it converts to 'true' using the
3970 // standard ToBoolean() conversion as described in ECMA-262,
3971 // section 9.2, page 30.
3972 ToBoolean(&exit, &pop_and_continue);
3973 Branch(true, &exit);
3974
3975 // Pop the result of evaluating the first part.
3976 pop_and_continue.Bind();
3977 frame_->EmitPop(r0);
3978
3979 // Evaluate right side expression.
3980 is_false.Bind();
3981 LoadAndSpill(node->right());
3982
3983 // Exit (always with a materialized value).
3984 exit.Bind();
3985 } else if (has_cc() || is_false.is_linked()) {
3986 // The left-hand side is either (a) partially compiled to
3987 // control flow with a final branch left to emit or (b) fully
3988 // compiled to control flow and possibly false.
3989 if (has_cc()) {
3990 Branch(true, true_target());
3991 }
3992 is_false.Bind();
3993 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003994 true_target(),
3995 false_target(),
3996 false);
3997 } else {
3998 // Nothing to do.
3999 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
4000 }
4001
4002 } else {
4003 // Optimize for the case where (at least) one of the expressions
4004 // is a literal small integer.
4005 Literal* lliteral = node->left()->AsLiteral();
4006 Literal* rliteral = node->right()->AsLiteral();
4007 // NOTE: The code below assumes that the slow cases (calls to runtime)
4008 // never return a constant/immutable object.
4009 bool overwrite_left =
4010 (node->left()->AsBinaryOperation() != NULL &&
4011 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
4012 bool overwrite_right =
4013 (node->right()->AsBinaryOperation() != NULL &&
4014 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
4015
4016 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
4017 LoadAndSpill(node->left());
4018 SmiOperation(node->op(),
4019 rliteral->handle(),
4020 false,
4021 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
4022
4023 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
4024 LoadAndSpill(node->right());
4025 SmiOperation(node->op(),
4026 lliteral->handle(),
4027 true,
4028 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
4029
4030 } else {
4031 OverwriteMode overwrite_mode = NO_OVERWRITE;
4032 if (overwrite_left) {
4033 overwrite_mode = OVERWRITE_LEFT;
4034 } else if (overwrite_right) {
4035 overwrite_mode = OVERWRITE_RIGHT;
4036 }
4037 LoadAndSpill(node->left());
4038 LoadAndSpill(node->right());
4039 GenericBinaryOperation(node->op(), overwrite_mode);
4040 }
4041 frame_->EmitPush(r0);
4042 }
4043 ASSERT(!has_valid_frame() ||
4044 (has_cc() && frame_->height() == original_height) ||
4045 (!has_cc() && frame_->height() == original_height + 1));
4046}
4047
4048
4049void CodeGenerator::VisitThisFunction(ThisFunction* node) {
4050#ifdef DEBUG
4051 int original_height = frame_->height();
4052#endif
4053 VirtualFrame::SpilledScope spilled_scope;
4054 __ ldr(r0, frame_->Function());
4055 frame_->EmitPush(r0);
4056 ASSERT(frame_->height() == original_height + 1);
4057}
4058
4059
4060void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
4061#ifdef DEBUG
4062 int original_height = frame_->height();
4063#endif
4064 VirtualFrame::SpilledScope spilled_scope;
4065 Comment cmnt(masm_, "[ CompareOperation");
4066
4067 // Get the expressions from the node.
4068 Expression* left = node->left();
4069 Expression* right = node->right();
4070 Token::Value op = node->op();
4071
4072 // To make null checks efficient, we check if either left or right is the
4073 // literal 'null'. If so, we optimize the code by inlining a null check
4074 // instead of calling the (very) general runtime routine for checking
4075 // equality.
4076 if (op == Token::EQ || op == Token::EQ_STRICT) {
4077 bool left_is_null =
4078 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
4079 bool right_is_null =
4080 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
4081 // The 'null' value can only be equal to 'null' or 'undefined'.
4082 if (left_is_null || right_is_null) {
4083 LoadAndSpill(left_is_null ? right : left);
4084 frame_->EmitPop(r0);
4085 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4086 __ cmp(r0, ip);
4087
4088 // The 'null' value is only equal to 'undefined' if using non-strict
4089 // comparisons.
4090 if (op != Token::EQ_STRICT) {
4091 true_target()->Branch(eq);
4092
4093 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4094 __ cmp(r0, Operand(ip));
4095 true_target()->Branch(eq);
4096
4097 __ tst(r0, Operand(kSmiTagMask));
4098 false_target()->Branch(eq);
4099
4100 // It can be an undetectable object.
4101 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
4102 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
4103 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
4104 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
4105 }
4106
4107 cc_reg_ = eq;
4108 ASSERT(has_cc() && frame_->height() == original_height);
4109 return;
4110 }
4111 }
4112
4113 // To make typeof testing for natives implemented in JavaScript really
4114 // efficient, we generate special code for expressions of the form:
4115 // 'typeof <expression> == <string>'.
4116 UnaryOperation* operation = left->AsUnaryOperation();
4117 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
4118 (operation != NULL && operation->op() == Token::TYPEOF) &&
4119 (right->AsLiteral() != NULL &&
4120 right->AsLiteral()->handle()->IsString())) {
4121 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
4122
4123 // Load the operand, move it to register r1.
4124 LoadTypeofExpression(operation->expression());
4125 frame_->EmitPop(r1);
4126
4127 if (check->Equals(Heap::number_symbol())) {
4128 __ tst(r1, Operand(kSmiTagMask));
4129 true_target()->Branch(eq);
4130 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4131 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4132 __ cmp(r1, ip);
4133 cc_reg_ = eq;
4134
4135 } else if (check->Equals(Heap::string_symbol())) {
4136 __ tst(r1, Operand(kSmiTagMask));
4137 false_target()->Branch(eq);
4138
4139 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4140
4141 // It can be an undetectable string object.
4142 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4143 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4144 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4145 false_target()->Branch(eq);
4146
4147 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4148 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
4149 cc_reg_ = lt;
4150
4151 } else if (check->Equals(Heap::boolean_symbol())) {
4152 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4153 __ cmp(r1, ip);
4154 true_target()->Branch(eq);
4155 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4156 __ cmp(r1, ip);
4157 cc_reg_ = eq;
4158
4159 } else if (check->Equals(Heap::undefined_symbol())) {
4160 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4161 __ cmp(r1, ip);
4162 true_target()->Branch(eq);
4163
4164 __ tst(r1, Operand(kSmiTagMask));
4165 false_target()->Branch(eq);
4166
4167 // It can be an undetectable object.
4168 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4169 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4170 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4171 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4172
4173 cc_reg_ = eq;
4174
4175 } else if (check->Equals(Heap::function_symbol())) {
4176 __ tst(r1, Operand(kSmiTagMask));
4177 false_target()->Branch(eq);
Steve Blockd0582a62009-12-15 09:54:21 +00004178 Register map_reg = r2;
4179 __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
4180 true_target()->Branch(eq);
4181 // Regular expressions are callable so typeof == 'function'.
4182 __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004183 cc_reg_ = eq;
4184
4185 } else if (check->Equals(Heap::object_symbol())) {
4186 __ tst(r1, Operand(kSmiTagMask));
4187 false_target()->Branch(eq);
4188
Steve Blocka7e24c12009-10-30 11:49:00 +00004189 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4190 __ cmp(r1, ip);
4191 true_target()->Branch(eq);
4192
Steve Blockd0582a62009-12-15 09:54:21 +00004193 Register map_reg = r2;
4194 __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
4195 false_target()->Branch(eq);
4196
Steve Blocka7e24c12009-10-30 11:49:00 +00004197 // It can be an undetectable object.
Steve Blockd0582a62009-12-15 09:54:21 +00004198 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004199 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4200 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4201 false_target()->Branch(eq);
4202
Steve Blockd0582a62009-12-15 09:54:21 +00004203 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4204 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004205 false_target()->Branch(lt);
Steve Blockd0582a62009-12-15 09:54:21 +00004206 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004207 cc_reg_ = le;
4208
4209 } else {
4210 // Uncommon case: typeof testing against a string literal that is
4211 // never returned from the typeof operator.
4212 false_target()->Jump();
4213 }
4214 ASSERT(!has_valid_frame() ||
4215 (has_cc() && frame_->height() == original_height));
4216 return;
4217 }
4218
4219 switch (op) {
4220 case Token::EQ:
4221 Comparison(eq, left, right, false);
4222 break;
4223
4224 case Token::LT:
4225 Comparison(lt, left, right);
4226 break;
4227
4228 case Token::GT:
4229 Comparison(gt, left, right);
4230 break;
4231
4232 case Token::LTE:
4233 Comparison(le, left, right);
4234 break;
4235
4236 case Token::GTE:
4237 Comparison(ge, left, right);
4238 break;
4239
4240 case Token::EQ_STRICT:
4241 Comparison(eq, left, right, true);
4242 break;
4243
4244 case Token::IN: {
4245 LoadAndSpill(left);
4246 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00004247 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004248 frame_->EmitPush(r0);
4249 break;
4250 }
4251
4252 case Token::INSTANCEOF: {
4253 LoadAndSpill(left);
4254 LoadAndSpill(right);
4255 InstanceofStub stub;
4256 frame_->CallStub(&stub, 2);
4257 // At this point if instanceof succeeded then r0 == 0.
4258 __ tst(r0, Operand(r0));
4259 cc_reg_ = eq;
4260 break;
4261 }
4262
4263 default:
4264 UNREACHABLE();
4265 }
4266 ASSERT((has_cc() && frame_->height() == original_height) ||
4267 (!has_cc() && frame_->height() == original_height + 1));
4268}
4269
4270
Leon Clarked91b9f72010-01-27 17:25:45 +00004271void CodeGenerator::EmitKeyedLoad(bool is_global) {
4272 Comment cmnt(masm_, "[ Load from keyed Property");
4273 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4274 RelocInfo::Mode rmode = is_global
4275 ? RelocInfo::CODE_TARGET_CONTEXT
4276 : RelocInfo::CODE_TARGET;
4277 frame_->CallCodeObject(ic, rmode, 0);
4278}
4279
4280
Steve Blocka7e24c12009-10-30 11:49:00 +00004281#ifdef DEBUG
4282bool CodeGenerator::HasValidEntryRegisters() { return true; }
4283#endif
4284
4285
4286#undef __
4287#define __ ACCESS_MASM(masm)
4288
4289
4290Handle<String> Reference::GetName() {
4291 ASSERT(type_ == NAMED);
4292 Property* property = expression_->AsProperty();
4293 if (property == NULL) {
4294 // Global variable reference treated as a named property reference.
4295 VariableProxy* proxy = expression_->AsVariableProxy();
4296 ASSERT(proxy->AsVariable() != NULL);
4297 ASSERT(proxy->AsVariable()->is_global());
4298 return proxy->name();
4299 } else {
4300 Literal* raw_name = property->key()->AsLiteral();
4301 ASSERT(raw_name != NULL);
4302 return Handle<String>(String::cast(*raw_name->handle()));
4303 }
4304}
4305
4306
Steve Blockd0582a62009-12-15 09:54:21 +00004307void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004308 ASSERT(cgen_->HasValidEntryRegisters());
4309 ASSERT(!is_illegal());
4310 ASSERT(!cgen_->has_cc());
4311 MacroAssembler* masm = cgen_->masm();
4312 Property* property = expression_->AsProperty();
4313 if (property != NULL) {
4314 cgen_->CodeForSourcePosition(property->position());
4315 }
4316
4317 switch (type_) {
4318 case SLOT: {
4319 Comment cmnt(masm, "[ Load from Slot");
4320 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4321 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00004322 cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004323 break;
4324 }
4325
4326 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004327 VirtualFrame* frame = cgen_->frame();
4328 Comment cmnt(masm, "[ Load from named Property");
4329 Handle<String> name(GetName());
4330 Variable* var = expression_->AsVariableProxy()->AsVariable();
4331 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4332 // Setup the name register.
4333 Result name_reg(r2);
4334 __ mov(r2, Operand(name));
4335 ASSERT(var == NULL || var->is_global());
4336 RelocInfo::Mode rmode = (var == NULL)
4337 ? RelocInfo::CODE_TARGET
4338 : RelocInfo::CODE_TARGET_CONTEXT;
4339 frame->CallCodeObject(ic, rmode, &name_reg, 0);
4340 frame->EmitPush(r0);
4341 break;
4342 }
4343
4344 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004345 // TODO(181): Implement inlined version of array indexing once
4346 // loop nesting is properly tracked on ARM.
Steve Blocka7e24c12009-10-30 11:49:00 +00004347 ASSERT(property != NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00004348 Variable* var = expression_->AsVariableProxy()->AsVariable();
4349 ASSERT(var == NULL || var->is_global());
Leon Clarked91b9f72010-01-27 17:25:45 +00004350 cgen_->EmitKeyedLoad(var != NULL);
4351 cgen_->frame()->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004352 break;
4353 }
4354
4355 default:
4356 UNREACHABLE();
4357 }
Leon Clarked91b9f72010-01-27 17:25:45 +00004358
4359 if (!persist_after_get_) {
4360 cgen_->UnloadReference(this);
4361 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004362}
4363
4364
4365void Reference::SetValue(InitState init_state) {
4366 ASSERT(!is_illegal());
4367 ASSERT(!cgen_->has_cc());
4368 MacroAssembler* masm = cgen_->masm();
4369 VirtualFrame* frame = cgen_->frame();
4370 Property* property = expression_->AsProperty();
4371 if (property != NULL) {
4372 cgen_->CodeForSourcePosition(property->position());
4373 }
4374
4375 switch (type_) {
4376 case SLOT: {
4377 Comment cmnt(masm, "[ Store to Slot");
4378 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00004379 cgen_->StoreToSlot(slot, init_state);
Steve Blocka7e24c12009-10-30 11:49:00 +00004380 break;
4381 }
4382
4383 case NAMED: {
4384 Comment cmnt(masm, "[ Store to named Property");
4385 // Call the appropriate IC code.
4386 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4387 Handle<String> name(GetName());
4388
4389 Result value(r0);
4390 frame->EmitPop(r0);
4391
4392 // Setup the name register.
4393 Result property_name(r2);
4394 __ mov(r2, Operand(name));
4395 frame->CallCodeObject(ic,
4396 RelocInfo::CODE_TARGET,
4397 &value,
4398 &property_name,
4399 0);
4400 frame->EmitPush(r0);
4401 break;
4402 }
4403
4404 case KEYED: {
4405 Comment cmnt(masm, "[ Store to keyed Property");
4406 Property* property = expression_->AsProperty();
4407 ASSERT(property != NULL);
4408 cgen_->CodeForSourcePosition(property->position());
4409
4410 // Call IC code.
4411 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4412 // TODO(1222589): Make the IC grab the values from the stack.
4413 Result value(r0);
4414 frame->EmitPop(r0); // value
4415 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
4416 frame->EmitPush(r0);
4417 break;
4418 }
4419
4420 default:
4421 UNREACHABLE();
4422 }
Leon Clarked91b9f72010-01-27 17:25:45 +00004423 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004424}
4425
4426
Leon Clarkee46be812010-01-19 14:06:41 +00004427void FastNewClosureStub::Generate(MacroAssembler* masm) {
4428 // Clone the boilerplate in new space. Set the context to the
4429 // current context in cp.
4430 Label gc;
4431
4432 // Pop the boilerplate function from the stack.
4433 __ pop(r3);
4434
4435 // Attempt to allocate new JSFunction in new space.
4436 __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
4437 r0,
4438 r1,
4439 r2,
4440 &gc,
4441 TAG_OBJECT);
4442
4443 // Compute the function map in the current global context and set that
4444 // as the map of the allocated object.
4445 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4446 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4447 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
4448 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4449
4450 // Clone the rest of the boilerplate fields. We don't have to update
4451 // the write barrier because the allocated object is in new space.
4452 for (int offset = kPointerSize;
4453 offset < JSFunction::kSize;
4454 offset += kPointerSize) {
4455 if (offset == JSFunction::kContextOffset) {
4456 __ str(cp, FieldMemOperand(r0, offset));
4457 } else {
4458 __ ldr(r1, FieldMemOperand(r3, offset));
4459 __ str(r1, FieldMemOperand(r0, offset));
4460 }
4461 }
4462
4463 // Return result. The argument boilerplate has been popped already.
4464 __ Ret();
4465
4466 // Create a new closure through the slower runtime call.
4467 __ bind(&gc);
4468 __ push(cp);
4469 __ push(r3);
4470 __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
4471}
4472
4473
4474void FastNewContextStub::Generate(MacroAssembler* masm) {
4475 // Try to allocate the context in new space.
4476 Label gc;
4477 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
4478
4479 // Attempt to allocate the context in new space.
4480 __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
4481 r0,
4482 r1,
4483 r2,
4484 &gc,
4485 TAG_OBJECT);
4486
4487 // Load the function from the stack.
4488 __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
4489
4490 // Setup the object header.
4491 __ LoadRoot(r2, Heap::kContextMapRootIndex);
4492 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4493 __ mov(r2, Operand(length));
4494 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
4495
4496 // Setup the fixed slots.
4497 __ mov(r1, Operand(Smi::FromInt(0)));
4498 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
4499 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
4500 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4501 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
4502
4503 // Copy the global object from the surrounding context.
4504 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4505 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
4506
4507 // Initialize the rest of the slots to undefined.
4508 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
4509 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
4510 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
4511 }
4512
4513 // Remove the on-stack argument and return.
4514 __ mov(cp, r0);
4515 __ pop();
4516 __ Ret();
4517
4518 // Need to collect. Call into runtime system.
4519 __ bind(&gc);
4520 __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
4521}
4522
4523
Steve Blocka7e24c12009-10-30 11:49:00 +00004524// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4525// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4526// (31 instead of 32).
4527static void CountLeadingZeros(
4528 MacroAssembler* masm,
4529 Register source,
4530 Register scratch,
4531 Register zeros) {
4532#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4533 __ clz(zeros, source); // This instruction is only supported after ARM5.
4534#else
4535 __ mov(zeros, Operand(0));
4536 __ mov(scratch, source);
4537 // Top 16.
4538 __ tst(scratch, Operand(0xffff0000));
4539 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4540 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4541 // Top 8.
4542 __ tst(scratch, Operand(0xff000000));
4543 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4544 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4545 // Top 4.
4546 __ tst(scratch, Operand(0xf0000000));
4547 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4548 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4549 // Top 2.
4550 __ tst(scratch, Operand(0xc0000000));
4551 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4552 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4553 // Top bit.
4554 __ tst(scratch, Operand(0x80000000u));
4555 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4556#endif
4557}
4558
4559
4560// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4561// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4562// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4563// scratch register. Destroys the source register. No GC occurs during this
4564// stub so you don't have to set up the frame.
4565class ConvertToDoubleStub : public CodeStub {
4566 public:
4567 ConvertToDoubleStub(Register result_reg_1,
4568 Register result_reg_2,
4569 Register source_reg,
4570 Register scratch_reg)
4571 : result1_(result_reg_1),
4572 result2_(result_reg_2),
4573 source_(source_reg),
4574 zeros_(scratch_reg) { }
4575
4576 private:
4577 Register result1_;
4578 Register result2_;
4579 Register source_;
4580 Register zeros_;
4581
4582 // Minor key encoding in 16 bits.
4583 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4584 class OpBits: public BitField<Token::Value, 2, 14> {};
4585
4586 Major MajorKey() { return ConvertToDouble; }
4587 int MinorKey() {
4588 // Encode the parameters in a unique 16 bit value.
4589 return result1_.code() +
4590 (result2_.code() << 4) +
4591 (source_.code() << 8) +
4592 (zeros_.code() << 12);
4593 }
4594
4595 void Generate(MacroAssembler* masm);
4596
4597 const char* GetName() { return "ConvertToDoubleStub"; }
4598
4599#ifdef DEBUG
4600 void Print() { PrintF("ConvertToDoubleStub\n"); }
4601#endif
4602};
4603
4604
4605void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4606#ifndef BIG_ENDIAN_FLOATING_POINT
4607 Register exponent = result1_;
4608 Register mantissa = result2_;
4609#else
4610 Register exponent = result2_;
4611 Register mantissa = result1_;
4612#endif
4613 Label not_special;
4614 // Convert from Smi to integer.
4615 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4616 // Move sign bit from source to destination. This works because the sign bit
4617 // in the exponent word of the double has the same position and polarity as
4618 // the 2's complement sign bit in a Smi.
4619 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4620 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4621 // Subtract from 0 if source was negative.
4622 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4623 __ cmp(source_, Operand(1));
4624 __ b(gt, &not_special);
4625
4626 // We have -1, 0 or 1, which we treat specially.
4627 __ cmp(source_, Operand(0));
4628 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4629 static const uint32_t exponent_word_for_1 =
4630 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4631 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4632 // 1, 0 and -1 all have 0 for the second word.
4633 __ mov(mantissa, Operand(0));
4634 __ Ret();
4635
4636 __ bind(&not_special);
4637 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4638 // Gets the wrong answer for 0, but we already checked for that case above.
4639 CountLeadingZeros(masm, source_, mantissa, zeros_);
4640 // Compute exponent and or it into the exponent register.
4641 // We use result2 as a scratch register here.
4642 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4643 __ orr(exponent,
4644 exponent,
4645 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4646 // Shift up the source chopping the top bit off.
4647 __ add(zeros_, zeros_, Operand(1));
4648 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4649 __ mov(source_, Operand(source_, LSL, zeros_));
4650 // Compute lower part of fraction (last 12 bits).
4651 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4652 // And the top (top 20 bits).
4653 __ orr(exponent,
4654 exponent,
4655 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4656 __ Ret();
4657}
4658
4659
4660// This stub can convert a signed int32 to a heap number (double). It does
4661// not work for int32s that are in Smi range! No GC occurs during this stub
4662// so you don't have to set up the frame.
4663class WriteInt32ToHeapNumberStub : public CodeStub {
4664 public:
4665 WriteInt32ToHeapNumberStub(Register the_int,
4666 Register the_heap_number,
4667 Register scratch)
4668 : the_int_(the_int),
4669 the_heap_number_(the_heap_number),
4670 scratch_(scratch) { }
4671
4672 private:
4673 Register the_int_;
4674 Register the_heap_number_;
4675 Register scratch_;
4676
4677 // Minor key encoding in 16 bits.
4678 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4679 class OpBits: public BitField<Token::Value, 2, 14> {};
4680
4681 Major MajorKey() { return WriteInt32ToHeapNumber; }
4682 int MinorKey() {
4683 // Encode the parameters in a unique 16 bit value.
4684 return the_int_.code() +
4685 (the_heap_number_.code() << 4) +
4686 (scratch_.code() << 8);
4687 }
4688
4689 void Generate(MacroAssembler* masm);
4690
4691 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4692
4693#ifdef DEBUG
4694 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4695#endif
4696};
4697
4698
4699// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00004700void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004701 Label max_negative_int;
4702 // the_int_ has the answer which is a signed int32 but not a Smi.
4703 // We test for the special value that has a different exponent. This test
4704 // has the neat side effect of setting the flags according to the sign.
4705 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4706 __ cmp(the_int_, Operand(0x80000000u));
4707 __ b(eq, &max_negative_int);
4708 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4709 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4710 uint32_t non_smi_exponent =
4711 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4712 __ mov(scratch_, Operand(non_smi_exponent));
4713 // Set the sign bit in scratch_ if the value was negative.
4714 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4715 // Subtract from 0 if the value was negative.
4716 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4717 // We should be masking the implict first digit of the mantissa away here,
4718 // but it just ends up combining harmlessly with the last digit of the
4719 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4720 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4721 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4722 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4723 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4724 __ str(scratch_, FieldMemOperand(the_heap_number_,
4725 HeapNumber::kExponentOffset));
4726 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4727 __ str(scratch_, FieldMemOperand(the_heap_number_,
4728 HeapNumber::kMantissaOffset));
4729 __ Ret();
4730
4731 __ bind(&max_negative_int);
4732 // The max negative int32 is stored as a positive number in the mantissa of
4733 // a double because it uses a sign bit instead of using two's complement.
4734 // The actual mantissa bits stored are all 0 because the implicit most
4735 // significant 1 bit is not stored.
4736 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4737 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4738 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4739 __ mov(ip, Operand(0));
4740 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4741 __ Ret();
4742}
4743
4744
4745// Handle the case where the lhs and rhs are the same object.
4746// Equality is almost reflexive (everything but NaN), so this is a test
4747// for "identity and not NaN".
4748static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4749 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00004750 Condition cc,
4751 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004752 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00004753 Label heap_number, return_equal;
4754 Register exp_mask_reg = r5;
Steve Blocka7e24c12009-10-30 11:49:00 +00004755 __ cmp(r0, Operand(r1));
4756 __ b(ne, &not_identical);
4757
Leon Clarkee46be812010-01-19 14:06:41 +00004758 // The two objects are identical. If we know that one of them isn't NaN then
4759 // we now know they test equal.
4760 if (cc != eq || !never_nan_nan) {
4761 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004762
Leon Clarkee46be812010-01-19 14:06:41 +00004763 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4764 // so we do the second best thing - test it ourselves.
4765 // They are both equal and they are not both Smis so both of them are not
4766 // Smis. If it's not a heap number, then return equal.
4767 if (cc == lt || cc == gt) {
4768 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004769 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00004770 } else {
4771 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4772 __ b(eq, &heap_number);
4773 // Comparing JS objects with <=, >= is complicated.
4774 if (cc != eq) {
4775 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4776 __ b(ge, slow);
4777 // Normally here we fall through to return_equal, but undefined is
4778 // special: (undefined == undefined) == true, but
4779 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
4780 if (cc == le || cc == ge) {
4781 __ cmp(r4, Operand(ODDBALL_TYPE));
4782 __ b(ne, &return_equal);
4783 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4784 __ cmp(r0, Operand(r2));
4785 __ b(ne, &return_equal);
4786 if (cc == le) {
4787 // undefined <= undefined should fail.
4788 __ mov(r0, Operand(GREATER));
4789 } else {
4790 // undefined >= undefined should fail.
4791 __ mov(r0, Operand(LESS));
4792 }
4793 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00004794 }
Steve Blockd0582a62009-12-15 09:54:21 +00004795 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004796 }
4797 }
Leon Clarkee46be812010-01-19 14:06:41 +00004798
Steve Blocka7e24c12009-10-30 11:49:00 +00004799 __ bind(&return_equal);
4800 if (cc == lt) {
4801 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4802 } else if (cc == gt) {
4803 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4804 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00004805 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00004806 }
4807 __ mov(pc, Operand(lr)); // Return.
4808
Leon Clarkee46be812010-01-19 14:06:41 +00004809 if (cc != eq || !never_nan_nan) {
4810 // For less and greater we don't have to check for NaN since the result of
4811 // x < x is false regardless. For the others here is some code to check
4812 // for NaN.
4813 if (cc != lt && cc != gt) {
4814 __ bind(&heap_number);
4815 // It is a heap number, so return non-equal if it's NaN and equal if it's
4816 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00004817
Leon Clarkee46be812010-01-19 14:06:41 +00004818 // The representation of NaN values has all exponent bits (52..62) set,
4819 // and not all mantissa bits (0..51) clear.
4820 // Read top bits of double representation (second word of value).
4821 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4822 // Test that exponent bits are all set.
4823 __ and_(r3, r2, Operand(exp_mask_reg));
4824 __ cmp(r3, Operand(exp_mask_reg));
4825 __ b(ne, &return_equal);
4826
4827 // Shift out flag and all exponent bits, retaining only mantissa.
4828 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4829 // Or with all low-bits of mantissa.
4830 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4831 __ orr(r0, r3, Operand(r2), SetCC);
4832 // For equal we already have the right value in r0: Return zero (equal)
4833 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
4834 // not (it's a NaN). For <= and >= we need to load r0 with the failing
4835 // value if it's a NaN.
4836 if (cc != eq) {
4837 // All-zero means Infinity means equal.
4838 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4839 if (cc == le) {
4840 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4841 } else {
4842 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4843 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004844 }
Leon Clarkee46be812010-01-19 14:06:41 +00004845 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00004846 }
Leon Clarkee46be812010-01-19 14:06:41 +00004847 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00004848 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004849
4850 __ bind(&not_identical);
4851}
4852
4853
4854// See comment at call site.
4855static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00004856 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00004857 Label* slow,
4858 bool strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004859 Label rhs_is_smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00004860 __ tst(r0, Operand(kSmiTagMask));
Leon Clarked91b9f72010-01-27 17:25:45 +00004861 __ b(eq, &rhs_is_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00004862
Leon Clarked91b9f72010-01-27 17:25:45 +00004863 // Lhs is a Smi. Check whether the rhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00004864 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4865 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004866 // If rhs is not a number and lhs is a Smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00004867 // succeed. Return non-equal (r0 is already not zero)
4868 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4869 } else {
4870 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4871 // the runtime.
4872 __ b(ne, slow);
4873 }
4874
Leon Clarked91b9f72010-01-27 17:25:45 +00004875 // Lhs (r1) is a smi, rhs (r0) is a number.
Steve Blockd0582a62009-12-15 09:54:21 +00004876 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004877 // Convert lhs to a double in d7 .
Steve Blockd0582a62009-12-15 09:54:21 +00004878 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00004879 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
4880 __ vmov(s15, r7);
4881 __ vcvt(d7, s15);
4882 // Load the double from rhs, tagged HeapNumber r0, to d6.
4883 __ sub(r7, r0, Operand(kHeapObjectTag));
4884 __ vldr(d6, r7, HeapNumber::kValueOffset);
Steve Blockd0582a62009-12-15 09:54:21 +00004885 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004886 __ push(lr);
4887 // Convert lhs to a double in r2, r3.
Steve Blockd0582a62009-12-15 09:54:21 +00004888 __ mov(r7, Operand(r1));
4889 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4890 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00004891 // Load rhs to a double in r0, r1.
4892 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4893 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4894 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004895 }
4896
Steve Blocka7e24c12009-10-30 11:49:00 +00004897 // We now have both loaded as doubles but we can skip the lhs nan check
Leon Clarked91b9f72010-01-27 17:25:45 +00004898 // since it's a smi.
Leon Clarkee46be812010-01-19 14:06:41 +00004899 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00004900
Leon Clarked91b9f72010-01-27 17:25:45 +00004901 __ bind(&rhs_is_smi);
4902 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00004903 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4904 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004905 // If lhs is not a number and rhs is a smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00004906 // succeed. Return non-equal.
4907 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4908 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4909 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004910 // Smi compared non-strictly with a non-smi non-heap-number. Call
Steve Blocka7e24c12009-10-30 11:49:00 +00004911 // the runtime.
4912 __ b(ne, slow);
4913 }
4914
Leon Clarked91b9f72010-01-27 17:25:45 +00004915 // Rhs (r0) is a smi, lhs (r1) is a heap number.
Steve Blockd0582a62009-12-15 09:54:21 +00004916 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004917 // Convert rhs to a double in d6 .
Steve Blockd0582a62009-12-15 09:54:21 +00004918 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00004919 // Load the double from lhs, tagged HeapNumber r1, to d7.
4920 __ sub(r7, r1, Operand(kHeapObjectTag));
4921 __ vldr(d7, r7, HeapNumber::kValueOffset);
4922 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
4923 __ vmov(s13, r7);
4924 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00004925 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004926 __ push(lr);
4927 // Load lhs to a double in r2, r3.
4928 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4929 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4930 // Convert rhs to a double in r0, r1.
Steve Blockd0582a62009-12-15 09:54:21 +00004931 __ mov(r7, Operand(r0));
4932 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4933 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00004934 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004935 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004936 // Fall through to both_loaded_as_doubles.
4937}
4938
4939
Leon Clarkee46be812010-01-19 14:06:41 +00004940void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004941 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004942 Register rhs_exponent = exp_first ? r0 : r1;
4943 Register lhs_exponent = exp_first ? r2 : r3;
4944 Register rhs_mantissa = exp_first ? r1 : r0;
4945 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004946 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00004947 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00004948
4949 Register exp_mask_reg = r5;
4950
4951 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004952 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
4953 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00004954 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00004955 __ mov(r4,
4956 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4957 SetCC);
4958 __ b(ne, &one_is_nan);
4959 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00004960 __ b(ne, &one_is_nan);
4961
4962 __ bind(lhs_not_nan);
4963 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4964 __ bind(&lhs_not_nan_exp_mask_is_loaded);
4965 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
4966 __ cmp(r4, Operand(exp_mask_reg));
4967 __ b(ne, &neither_is_nan);
4968 __ mov(r4,
4969 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4970 SetCC);
4971 __ b(ne, &one_is_nan);
4972 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00004973 __ b(eq, &neither_is_nan);
4974
4975 __ bind(&one_is_nan);
4976 // NaN comparisons always fail.
4977 // Load whatever we need in r0 to make the comparison fail.
4978 if (cc == lt || cc == le) {
4979 __ mov(r0, Operand(GREATER));
4980 } else {
4981 __ mov(r0, Operand(LESS));
4982 }
4983 __ mov(pc, Operand(lr)); // Return.
4984
4985 __ bind(&neither_is_nan);
4986}
4987
4988
4989// See comment at call site.
4990static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
4991 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004992 Register rhs_exponent = exp_first ? r0 : r1;
4993 Register lhs_exponent = exp_first ? r2 : r3;
4994 Register rhs_mantissa = exp_first ? r1 : r0;
4995 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004996
4997 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
4998 if (cc == eq) {
4999 // Doubles are not equal unless they have the same bit pattern.
5000 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00005001 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
5002 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00005003 // Return non-zero if the numbers are unequal.
5004 __ mov(pc, Operand(lr), LeaveCC, ne);
5005
Leon Clarkee46be812010-01-19 14:06:41 +00005006 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005007 // If exponents are equal then return 0.
5008 __ mov(pc, Operand(lr), LeaveCC, eq);
5009
5010 // Exponents are unequal. The only way we can return that the numbers
5011 // are equal is if one is -0 and the other is 0. We already dealt
5012 // with the case where both are -0 or both are 0.
5013 // We start by seeing if the mantissas (that are equal) or the bottom
5014 // 31 bits of the rhs exponent are non-zero. If so we return not
5015 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00005016 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005017 __ mov(r0, Operand(r4), LeaveCC, ne);
5018 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
5019 // Now they are equal if and only if the lhs exponent is zero in its
5020 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00005021 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00005022 __ mov(pc, Operand(lr));
5023 } else {
5024 // Call a native function to do a comparison between two non-NaNs.
5025 // Call C routine that may not cause GC or other trouble.
5026 __ mov(r5, Operand(ExternalReference::compare_doubles()));
5027 __ Jump(r5); // Tail call.
5028 }
5029}
5030
5031
5032// See comment at call site.
5033static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
5034 // If either operand is a JSObject or an oddball value, then they are
5035 // not equal since their pointers are different.
5036 // There is no test for undetectability in strict equality.
5037 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
5038 Label first_non_object;
5039 // Get the type of the first operand into r2 and compare it with
5040 // FIRST_JS_OBJECT_TYPE.
5041 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
5042 __ b(lt, &first_non_object);
5043
5044 // Return non-zero (r0 is not zero)
5045 Label return_not_equal;
5046 __ bind(&return_not_equal);
5047 __ mov(pc, Operand(lr)); // Return.
5048
5049 __ bind(&first_non_object);
5050 // Check for oddballs: true, false, null, undefined.
5051 __ cmp(r2, Operand(ODDBALL_TYPE));
5052 __ b(eq, &return_not_equal);
5053
5054 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
5055 __ b(ge, &return_not_equal);
5056
5057 // Check for oddballs: true, false, null, undefined.
5058 __ cmp(r3, Operand(ODDBALL_TYPE));
5059 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00005060
5061 // Now that we have the types we might as well check for symbol-symbol.
5062 // Ensure that no non-strings have the symbol bit set.
5063 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5064 ASSERT(kSymbolTag != 0);
5065 __ and_(r2, r2, Operand(r3));
5066 __ tst(r2, Operand(kIsSymbolMask));
5067 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00005068}
5069
5070
5071// See comment at call site.
5072static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
5073 Label* both_loaded_as_doubles,
5074 Label* not_heap_numbers,
5075 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00005076 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005077 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00005078 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5079 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00005080 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
5081
5082 // Both are heap numbers. Load them up then jump to the code we have
5083 // for that.
Leon Clarked91b9f72010-01-27 17:25:45 +00005084 if (CpuFeatures::IsSupported(VFP3)) {
5085 CpuFeatures::Scope scope(VFP3);
5086 __ sub(r7, r0, Operand(kHeapObjectTag));
5087 __ vldr(d6, r7, HeapNumber::kValueOffset);
5088 __ sub(r7, r1, Operand(kHeapObjectTag));
5089 __ vldr(d7, r7, HeapNumber::kValueOffset);
5090 } else {
5091 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5092 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5093 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5094 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005096 __ jmp(both_loaded_as_doubles);
5097}
5098
5099
5100// Fast negative check for symbol-to-symbol equality.
5101static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5102 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00005103 // Ensure that no non-strings have the symbol bit set.
5104 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5105 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005106 __ tst(r2, Operand(kIsSymbolMask));
5107 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00005108 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
5109 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005110 __ tst(r3, Operand(kIsSymbolMask));
5111 __ b(eq, slow);
5112
5113 // Both are symbols. We already checked they weren't the same pointer
5114 // so they are not equal.
5115 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
5116 __ mov(pc, Operand(lr)); // Return.
5117}
5118
5119
Leon Clarked91b9f72010-01-27 17:25:45 +00005120// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
5121// On exit r0 is 0, positive or negative to indicate the result of
5122// the comparison.
Steve Blocka7e24c12009-10-30 11:49:00 +00005123void CompareStub::Generate(MacroAssembler* masm) {
5124 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00005125 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00005126
5127 // NOTICE! This code is only reached after a smi-fast-case check, so
5128 // it is certain that at least one operand isn't a smi.
5129
5130 // Handle the case where the objects are identical. Either returns the answer
5131 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00005132 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005133
5134 // If either is a Smi (we know that not both are), then they can only
5135 // be strictly equal if the other is a HeapNumber.
5136 ASSERT_EQ(0, kSmiTag);
5137 ASSERT_EQ(0, Smi::FromInt(0));
5138 __ and_(r2, r0, Operand(r1));
5139 __ tst(r2, Operand(kSmiTagMask));
5140 __ b(ne, &not_smis);
5141 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5142 // 1) Return the answer.
5143 // 2) Go to slow.
5144 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00005145 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00005146 // In cases 3 and 4 we have found out we were dealing with a number-number
Leon Clarked91b9f72010-01-27 17:25:45 +00005147 // comparison. If VFP3 is supported the double values of the numbers have
5148 // been loaded into d7 and d6. Otherwise, the double values have been loaded
5149 // into r0, r1, r2, and r3.
Leon Clarkee46be812010-01-19 14:06:41 +00005150 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005151
5152 __ bind(&both_loaded_as_doubles);
Leon Clarked91b9f72010-01-27 17:25:45 +00005153 // The arguments have been converted to doubles and stored in d6 and d7, if
5154 // VFP3 is supported, or in r0, r1, r2, and r3.
Steve Blockd0582a62009-12-15 09:54:21 +00005155 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00005156 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00005157 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00005158 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00005159 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00005160 __ vcmp(d7, d6);
5161 __ vmrs(pc); // Move vector status bits to normal status bits.
5162 Label nan;
5163 __ b(vs, &nan);
5164 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
5165 __ mov(r0, Operand(LESS), LeaveCC, lt);
5166 __ mov(r0, Operand(GREATER), LeaveCC, gt);
5167 __ mov(pc, Operand(lr));
5168
5169 __ bind(&nan);
5170 // If one of the sides was a NaN then the v flag is set. Load r0 with
5171 // whatever it takes to make the comparison fail, since comparisons with NaN
5172 // always fail.
5173 if (cc_ == lt || cc_ == le) {
5174 __ mov(r0, Operand(GREATER));
5175 } else {
5176 __ mov(r0, Operand(LESS));
5177 }
Steve Blockd0582a62009-12-15 09:54:21 +00005178 __ mov(pc, Operand(lr));
5179 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00005180 // Checks for NaN in the doubles we have loaded. Can return the answer or
5181 // fall through if neither is a NaN. Also binds lhs_not_nan.
5182 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00005183 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
5184 // answer. Never falls through.
5185 EmitTwoNonNanDoubleComparison(masm, cc_);
5186 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005187
5188 __ bind(&not_smis);
5189 // At this point we know we are dealing with two different objects,
5190 // and neither of them is a Smi. The objects are in r0 and r1.
5191 if (strict_) {
5192 // This returns non-equal for some object types, or falls through if it
5193 // was not lucky.
5194 EmitStrictTwoHeapObjectCompare(masm);
5195 }
5196
5197 Label check_for_symbols;
Leon Clarked91b9f72010-01-27 17:25:45 +00005198 Label flat_string_check;
Steve Blocka7e24c12009-10-30 11:49:00 +00005199 // Check for heap-number-heap-number comparison. Can jump to slow case,
5200 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
5201 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00005202 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00005203 EmitCheckForTwoHeapNumbers(masm,
5204 &both_loaded_as_doubles,
5205 &check_for_symbols,
Leon Clarked91b9f72010-01-27 17:25:45 +00005206 &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005207
5208 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00005209 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
5210 // symbols.
5211 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 // Either jumps to slow or returns the answer. Assumes that r2 is the type
5213 // of r0 on entry.
Leon Clarked91b9f72010-01-27 17:25:45 +00005214 EmitCheckForSymbols(masm, &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005215 }
5216
Leon Clarked91b9f72010-01-27 17:25:45 +00005217 // Check for both being sequential ASCII strings, and inline if that is the
5218 // case.
5219 __ bind(&flat_string_check);
5220
5221 __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
5222
5223 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
5224 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
5225 r1,
5226 r0,
5227 r2,
5228 r3,
5229 r4,
5230 r5);
5231 // Never falls through to here.
5232
Steve Blocka7e24c12009-10-30 11:49:00 +00005233 __ bind(&slow);
Leon Clarked91b9f72010-01-27 17:25:45 +00005234
Steve Blocka7e24c12009-10-30 11:49:00 +00005235 __ push(r1);
5236 __ push(r0);
5237 // Figure out which native to call and setup the arguments.
5238 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00005239 if (cc_ == eq) {
5240 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5241 } else {
5242 native = Builtins::COMPARE;
5243 int ncr; // NaN compare result
5244 if (cc_ == lt || cc_ == le) {
5245 ncr = GREATER;
5246 } else {
5247 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
5248 ncr = LESS;
5249 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005250 __ mov(r0, Operand(Smi::FromInt(ncr)));
5251 __ push(r0);
5252 }
5253
5254 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5255 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00005256 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00005257}
5258
5259
5260// Allocates a heap number or jumps to the label if the young space is full and
5261// a scavenge is needed.
5262static void AllocateHeapNumber(
5263 MacroAssembler* masm,
5264 Label* need_gc, // Jump here if young space is full.
5265 Register result, // The tagged address of the new heap number.
5266 Register scratch1, // A scratch register.
5267 Register scratch2) { // Another scratch register.
5268 // Allocate an object in the heap for the heap number and tag it as a heap
5269 // object.
5270 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
5271 result,
5272 scratch1,
5273 scratch2,
5274 need_gc,
5275 TAG_OBJECT);
5276
5277 // Get heap number map and store it in the allocated object.
5278 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
5279 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5280}
5281
5282
5283// We fall into this code if the operands were Smis, but the result was
5284// not (eg. overflow). We branch into this code (to the not_smi label) if
5285// the operands were not both Smi. The operands are in r0 and r1. In order
5286// to call the C-implemented binary fp operation routines we need to end up
5287// with the double precision floating point operands in r0 and r1 (for the
5288// value in r1) and r2 and r3 (for the value in r0).
5289static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5290 Label* not_smi,
5291 const Builtins::JavaScript& builtin,
5292 Token::Value operation,
5293 OverwriteMode mode) {
5294 Label slow, slow_pop_2_first, do_the_call;
5295 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5296 // Smi-smi case (overflow).
5297 // Since both are Smis there is no heap number to overwrite, so allocate.
5298 // The new heap number is in r5. r6 and r7 are scratch.
5299 AllocateHeapNumber(masm, &slow, r5, r6, r7);
Steve Blockd0582a62009-12-15 09:54:21 +00005300
Leon Clarked91b9f72010-01-27 17:25:45 +00005301 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5302 // using registers d7 and d6 for the double values.
5303 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5304 Token::MOD != operation;
5305 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005306 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005307 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5308 __ vmov(s15, r7);
5309 __ vcvt(d7, s15);
5310 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5311 __ vmov(s13, r7);
5312 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005313 } else {
5314 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5315 __ mov(r7, Operand(r0));
5316 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5317 __ push(lr);
5318 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5319 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5320 __ mov(r7, Operand(r1));
5321 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5322 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5323 __ pop(lr);
5324 }
5325
Steve Blocka7e24c12009-10-30 11:49:00 +00005326 __ jmp(&do_the_call); // Tail call. No return.
5327
5328 // We jump to here if something goes wrong (one param is not a number of any
5329 // sort or new-space allocation fails).
5330 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005331
5332 // Push arguments to the stack
Steve Blocka7e24c12009-10-30 11:49:00 +00005333 __ push(r1);
5334 __ push(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00005335
5336 if (Token::ADD == operation) {
5337 // Test for string arguments before calling runtime.
5338 // r1 : first argument
5339 // r0 : second argument
5340 // sp[0] : second argument
5341 // sp[1] : first argument
5342
5343 Label not_strings, not_string1, string1;
5344 __ tst(r1, Operand(kSmiTagMask));
5345 __ b(eq, &not_string1);
5346 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
5347 __ b(ge, &not_string1);
5348
5349 // First argument is a a string, test second.
5350 __ tst(r0, Operand(kSmiTagMask));
5351 __ b(eq, &string1);
5352 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5353 __ b(ge, &string1);
5354
5355 // First and second argument are strings.
5356 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
5357
5358 // Only first argument is a string.
5359 __ bind(&string1);
5360 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
5361
5362 // First argument was not a string, test second.
5363 __ bind(&not_string1);
5364 __ tst(r0, Operand(kSmiTagMask));
5365 __ b(eq, &not_strings);
5366 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5367 __ b(ge, &not_strings);
5368
5369 // Only second argument is a string.
5370 __ b(&not_strings);
5371 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5372
5373 __ bind(&not_strings);
5374 }
5375
Steve Blocka7e24c12009-10-30 11:49:00 +00005376 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5377
5378 // We branch here if at least one of r0 and r1 is not a Smi.
5379 __ bind(not_smi);
5380 if (mode == NO_OVERWRITE) {
5381 // In the case where there is no chance of an overwritable float we may as
5382 // well do the allocation immediately while r0 and r1 are untouched.
5383 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5384 }
5385
5386 // Move r0 to a double in r2-r3.
5387 __ tst(r0, Operand(kSmiTagMask));
5388 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5389 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5390 __ b(ne, &slow);
5391 if (mode == OVERWRITE_RIGHT) {
5392 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5393 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005394 if (use_fp_registers) {
5395 CpuFeatures::Scope scope(VFP3);
5396 // Load the double from tagged HeapNumber r0 to d7.
5397 __ sub(r7, r0, Operand(kHeapObjectTag));
5398 __ vldr(d7, r7, HeapNumber::kValueOffset);
5399 } else {
5400 // Calling convention says that second double is in r2 and r3.
5401 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5402 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5403 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005404 __ jmp(&finished_loading_r0);
5405 __ bind(&r0_is_smi);
5406 if (mode == OVERWRITE_RIGHT) {
5407 // We can't overwrite a Smi so get address of new heap number into r5.
5408 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5409 }
Steve Blockd0582a62009-12-15 09:54:21 +00005410
Leon Clarked91b9f72010-01-27 17:25:45 +00005411 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005412 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005413 // Convert smi in r0 to double in d7.
5414 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5415 __ vmov(s15, r7);
5416 __ vcvt(d7, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005417 } else {
5418 // Write Smi from r0 to r3 and r2 in double format.
5419 __ mov(r7, Operand(r0));
5420 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5421 __ push(lr);
5422 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5423 __ pop(lr);
5424 }
5425
Steve Blocka7e24c12009-10-30 11:49:00 +00005426 __ bind(&finished_loading_r0);
5427
5428 // Move r1 to a double in r0-r1.
5429 __ tst(r1, Operand(kSmiTagMask));
5430 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5431 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5432 __ b(ne, &slow);
5433 if (mode == OVERWRITE_LEFT) {
5434 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5435 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005436 if (use_fp_registers) {
5437 CpuFeatures::Scope scope(VFP3);
5438 // Load the double from tagged HeapNumber r1 to d6.
5439 __ sub(r7, r1, Operand(kHeapObjectTag));
5440 __ vldr(d6, r7, HeapNumber::kValueOffset);
5441 } else {
5442 // Calling convention says that first double is in r0 and r1.
5443 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5444 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5445 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005446 __ jmp(&finished_loading_r1);
5447 __ bind(&r1_is_smi);
5448 if (mode == OVERWRITE_LEFT) {
5449 // We can't overwrite a Smi so get address of new heap number into r5.
5450 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5451 }
Steve Blockd0582a62009-12-15 09:54:21 +00005452
Leon Clarked91b9f72010-01-27 17:25:45 +00005453 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005454 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005455 // Convert smi in r1 to double in d6.
5456 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5457 __ vmov(s13, r7);
5458 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005459 } else {
5460 // Write Smi from r1 to r1 and r0 in double format.
5461 __ mov(r7, Operand(r1));
5462 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5463 __ push(lr);
5464 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5465 __ pop(lr);
5466 }
5467
Steve Blocka7e24c12009-10-30 11:49:00 +00005468 __ bind(&finished_loading_r1);
5469
5470 __ bind(&do_the_call);
Leon Clarked91b9f72010-01-27 17:25:45 +00005471 // If we are inlining the operation using VFP3 instructions for
5472 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5473 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005474 CpuFeatures::Scope scope(VFP3);
5475 // ARMv7 VFP3 instructions to implement
5476 // double precision, add, subtract, multiply, divide.
Steve Blockd0582a62009-12-15 09:54:21 +00005477
5478 if (Token::MUL == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005479 __ vmul(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005480 } else if (Token::DIV == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005481 __ vdiv(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005482 } else if (Token::ADD == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005483 __ vadd(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005484 } else if (Token::SUB == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005485 __ vsub(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005486 } else {
5487 UNREACHABLE();
5488 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005489 __ sub(r0, r5, Operand(kHeapObjectTag));
5490 __ vstr(d5, r0, HeapNumber::kValueOffset);
5491 __ add(r0, r0, Operand(kHeapObjectTag));
Steve Blockd0582a62009-12-15 09:54:21 +00005492 __ mov(pc, lr);
5493 return;
5494 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005495
5496 // If we did not inline the operation, then the arguments are in:
5497 // r0: Left value (least significant part of mantissa).
5498 // r1: Left value (sign, exponent, top of mantissa).
5499 // r2: Right value (least significant part of mantissa).
5500 // r3: Right value (sign, exponent, top of mantissa).
5501 // r5: Address of heap number for result.
5502
Steve Blocka7e24c12009-10-30 11:49:00 +00005503 __ push(lr); // For later.
5504 __ push(r5); // Address of heap number that is answer.
5505 __ AlignStack(0);
5506 // Call C routine that may not cause GC or other trouble.
5507 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5508 __ Call(r5);
5509 __ pop(r4); // Address of heap number.
5510 __ cmp(r4, Operand(Smi::FromInt(0)));
5511 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5512 // Store answer in the overwritable heap number.
5513#if !defined(USE_ARM_EABI)
5514 // Double returned in fp coprocessor register 0 and 1, encoded as register
5515 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5516 // substract the tag from r4.
5517 __ sub(r5, r4, Operand(kHeapObjectTag));
5518 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5519#else
5520 // Double returned in registers 0 and 1.
5521 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5522 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5523#endif
5524 __ mov(r0, Operand(r4));
5525 // And we are done.
5526 __ pop(pc);
5527}
5528
5529
5530// Tries to get a signed int32 out of a double precision floating point heap
5531// number. Rounds towards 0. Fastest for doubles that are in the ranges
5532// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5533// almost to the range of signed int32 values that are not Smis. Jumps to the
5534// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5535// (excluding the endpoints).
5536static void GetInt32(MacroAssembler* masm,
5537 Register source,
5538 Register dest,
5539 Register scratch,
5540 Register scratch2,
5541 Label* slow) {
5542 Label right_exponent, done;
5543 // Get exponent word.
5544 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5545 // Get exponent alone in scratch2.
5546 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5547 // Load dest with zero. We use this either for the final shift or
5548 // for the answer.
5549 __ mov(dest, Operand(0));
5550 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5551 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5552 // the exponent that we are fastest at and also the highest exponent we can
5553 // handle here.
5554 const uint32_t non_smi_exponent =
5555 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5556 __ cmp(scratch2, Operand(non_smi_exponent));
5557 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5558 __ b(eq, &right_exponent);
5559 // If the exponent is higher than that then go to slow case. This catches
5560 // numbers that don't fit in a signed int32, infinities and NaNs.
5561 __ b(gt, slow);
5562
5563 // We know the exponent is smaller than 30 (biased). If it is less than
5564 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5565 // it rounds to zero.
5566 const uint32_t zero_exponent =
5567 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5568 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5569 // Dest already has a Smi zero.
5570 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00005571 if (!CpuFeatures::IsSupported(VFP3)) {
5572 // We have a shifted exponent between 0 and 30 in scratch2.
5573 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5574 // We now have the exponent in dest. Subtract from 30 to get
5575 // how much to shift down.
5576 __ rsb(dest, dest, Operand(30));
5577 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005578 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00005579 if (CpuFeatures::IsSupported(VFP3)) {
5580 CpuFeatures::Scope scope(VFP3);
5581 // ARMv7 VFP3 instructions implementing double precision to integer
5582 // conversion using round to zero.
5583 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005584 __ vmov(d7, scratch2, scratch);
5585 __ vcvt(s15, d7);
5586 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005587 } else {
5588 // Get the top bits of the mantissa.
5589 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5590 // Put back the implicit 1.
5591 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5592 // Shift up the mantissa bits to take up the space the exponent used to
5593 // take. We just orred in the implicit bit so that took care of one and
5594 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
5595 // distance.
5596 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5597 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5598 // Put sign in zero flag.
5599 __ tst(scratch, Operand(HeapNumber::kSignMask));
5600 // Get the second half of the double. For some exponents we don't
5601 // actually need this because the bits get shifted out again, but
5602 // it's probably slower to test than just to do it.
5603 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5604 // Shift down 22 bits to get the last 10 bits.
5605 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5606 // Move down according to the exponent.
5607 __ mov(dest, Operand(scratch, LSR, dest));
5608 // Fix sign if sign bit was set.
5609 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005611 __ bind(&done);
5612}
5613
Steve Blocka7e24c12009-10-30 11:49:00 +00005614// For bitwise ops where the inputs are not both Smis we here try to determine
5615// whether both inputs are either Smis or at least heap numbers that can be
5616// represented by a 32 bit signed value. We truncate towards zero as required
5617// by the ES spec. If this is the case we do the bitwise op and see if the
5618// result is a Smi. If so, great, otherwise we try to find a heap number to
5619// write the answer into (either by allocating or by overwriting).
5620// On entry the operands are in r0 and r1. On exit the answer is in r0.
5621void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5622 Label slow, result_not_a_smi;
5623 Label r0_is_smi, r1_is_smi;
5624 Label done_checking_r0, done_checking_r1;
5625
5626 __ tst(r1, Operand(kSmiTagMask));
5627 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5628 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5629 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005630 GetInt32(masm, r1, r3, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005631 __ jmp(&done_checking_r1);
5632 __ bind(&r1_is_smi);
5633 __ mov(r3, Operand(r1, ASR, 1));
5634 __ bind(&done_checking_r1);
5635
5636 __ tst(r0, Operand(kSmiTagMask));
5637 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5638 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5639 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005640 GetInt32(masm, r0, r2, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005641 __ jmp(&done_checking_r0);
5642 __ bind(&r0_is_smi);
5643 __ mov(r2, Operand(r0, ASR, 1));
5644 __ bind(&done_checking_r0);
5645
5646 // r0 and r1: Original operands (Smi or heap numbers).
5647 // r2 and r3: Signed int32 operands.
5648 switch (op_) {
5649 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5650 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5651 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5652 case Token::SAR:
5653 // Use only the 5 least significant bits of the shift count.
5654 __ and_(r2, r2, Operand(0x1f));
5655 __ mov(r2, Operand(r3, ASR, r2));
5656 break;
5657 case Token::SHR:
5658 // Use only the 5 least significant bits of the shift count.
5659 __ and_(r2, r2, Operand(0x1f));
5660 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5661 // SHR is special because it is required to produce a positive answer.
5662 // The code below for writing into heap numbers isn't capable of writing
5663 // the register as an unsigned int so we go to slow case if we hit this
5664 // case.
5665 __ b(mi, &slow);
5666 break;
5667 case Token::SHL:
5668 // Use only the 5 least significant bits of the shift count.
5669 __ and_(r2, r2, Operand(0x1f));
5670 __ mov(r2, Operand(r3, LSL, r2));
5671 break;
5672 default: UNREACHABLE();
5673 }
5674 // check that the *signed* result fits in a smi
5675 __ add(r3, r2, Operand(0x40000000), SetCC);
5676 __ b(mi, &result_not_a_smi);
5677 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5678 __ Ret();
5679
5680 Label have_to_allocate, got_a_heap_number;
5681 __ bind(&result_not_a_smi);
5682 switch (mode_) {
5683 case OVERWRITE_RIGHT: {
5684 __ tst(r0, Operand(kSmiTagMask));
5685 __ b(eq, &have_to_allocate);
5686 __ mov(r5, Operand(r0));
5687 break;
5688 }
5689 case OVERWRITE_LEFT: {
5690 __ tst(r1, Operand(kSmiTagMask));
5691 __ b(eq, &have_to_allocate);
5692 __ mov(r5, Operand(r1));
5693 break;
5694 }
5695 case NO_OVERWRITE: {
5696 // Get a new heap number in r5. r6 and r7 are scratch.
5697 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5698 }
5699 default: break;
5700 }
5701 __ bind(&got_a_heap_number);
5702 // r2: Answer as signed int32.
5703 // r5: Heap number to write answer into.
5704
5705 // Nothing can go wrong now, so move the heap number to r0, which is the
5706 // result.
5707 __ mov(r0, Operand(r5));
5708
5709 // Tail call that writes the int32 in r2 to the heap number in r0, using
5710 // r3 as scratch. r0 is preserved and returned.
5711 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5712 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5713
5714 if (mode_ != NO_OVERWRITE) {
5715 __ bind(&have_to_allocate);
5716 // Get a new heap number in r5. r6 and r7 are scratch.
5717 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5718 __ jmp(&got_a_heap_number);
5719 }
5720
5721 // If all else failed then we go to the runtime system.
5722 __ bind(&slow);
5723 __ push(r1); // restore stack
5724 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005725 switch (op_) {
5726 case Token::BIT_OR:
5727 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5728 break;
5729 case Token::BIT_AND:
5730 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5731 break;
5732 case Token::BIT_XOR:
5733 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5734 break;
5735 case Token::SAR:
5736 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5737 break;
5738 case Token::SHR:
5739 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5740 break;
5741 case Token::SHL:
5742 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5743 break;
5744 default:
5745 UNREACHABLE();
5746 }
5747}
5748
5749
5750// Can we multiply by x with max two shifts and an add.
5751// This answers yes to all integers from 2 to 10.
5752static bool IsEasyToMultiplyBy(int x) {
5753 if (x < 2) return false; // Avoid special cases.
5754 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5755 if (IsPowerOf2(x)) return true; // Simple shift.
5756 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5757 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5758 return false;
5759}
5760
5761
5762// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5763// Source and destination may be the same register. This routine does
5764// not set carry and overflow the way a mul instruction would.
5765static void MultiplyByKnownInt(MacroAssembler* masm,
5766 Register source,
5767 Register destination,
5768 int known_int) {
5769 if (IsPowerOf2(known_int)) {
5770 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5771 } else if (PopCountLessThanEqual2(known_int)) {
5772 int first_bit = BitPosition(known_int);
5773 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5774 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5775 if (first_bit != 0) {
5776 __ mov(destination, Operand(destination, LSL, first_bit));
5777 }
5778 } else {
5779 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5780 int the_bit = BitPosition(known_int + 1);
5781 __ rsb(destination, source, Operand(source, LSL, the_bit));
5782 }
5783}
5784
5785
5786// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5787// a register for the cases where it doesn't know a good trick, and may deliver
5788// a result that needs shifting.
5789static void MultiplyByKnownInt2(
5790 MacroAssembler* masm,
5791 Register result,
5792 Register source,
5793 Register known_int_register, // Smi tagged.
5794 int known_int,
5795 int* required_shift) { // Including Smi tag shift
5796 switch (known_int) {
5797 case 3:
5798 __ add(result, source, Operand(source, LSL, 1));
5799 *required_shift = 1;
5800 break;
5801 case 5:
5802 __ add(result, source, Operand(source, LSL, 2));
5803 *required_shift = 1;
5804 break;
5805 case 6:
5806 __ add(result, source, Operand(source, LSL, 1));
5807 *required_shift = 2;
5808 break;
5809 case 7:
5810 __ rsb(result, source, Operand(source, LSL, 3));
5811 *required_shift = 1;
5812 break;
5813 case 9:
5814 __ add(result, source, Operand(source, LSL, 3));
5815 *required_shift = 1;
5816 break;
5817 case 10:
5818 __ add(result, source, Operand(source, LSL, 2));
5819 *required_shift = 2;
5820 break;
5821 default:
5822 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5823 __ mul(result, source, known_int_register);
5824 *required_shift = 0;
5825 }
5826}
5827
5828
Leon Clarkee46be812010-01-19 14:06:41 +00005829const char* GenericBinaryOpStub::GetName() {
5830 if (name_ != NULL) return name_;
5831 const int len = 100;
5832 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
5833 if (name_ == NULL) return "OOM";
5834 const char* op_name = Token::Name(op_);
5835 const char* overwrite_name;
5836 switch (mode_) {
5837 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
5838 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
5839 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
5840 default: overwrite_name = "UnknownOverwrite"; break;
5841 }
5842
5843 OS::SNPrintF(Vector<char>(name_, len),
5844 "GenericBinaryOpStub_%s_%s%s",
5845 op_name,
5846 overwrite_name,
5847 specialized_on_rhs_ ? "_ConstantRhs" : 0);
5848 return name_;
5849}
5850
5851
Steve Blocka7e24c12009-10-30 11:49:00 +00005852void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5853 // r1 : x
5854 // r0 : y
5855 // result : r0
5856
5857 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5858 // tell us that.
5859 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5860
5861 switch (op_) {
5862 case Token::ADD: {
5863 Label not_smi;
5864 // Fast path.
5865 ASSERT(kSmiTag == 0); // Adjust code below.
5866 __ tst(r2, Operand(kSmiTagMask));
5867 __ b(ne, &not_smi);
5868 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5869 // Return if no overflow.
5870 __ Ret(vc);
5871 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5872
5873 HandleBinaryOpSlowCases(masm,
5874 &not_smi,
5875 Builtins::ADD,
5876 Token::ADD,
5877 mode_);
5878 break;
5879 }
5880
5881 case Token::SUB: {
5882 Label not_smi;
5883 // Fast path.
5884 ASSERT(kSmiTag == 0); // Adjust code below.
5885 __ tst(r2, Operand(kSmiTagMask));
5886 __ b(ne, &not_smi);
5887 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5888 // Return if no overflow.
5889 __ Ret(vc);
5890 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5891
5892 HandleBinaryOpSlowCases(masm,
5893 &not_smi,
5894 Builtins::SUB,
5895 Token::SUB,
5896 mode_);
5897 break;
5898 }
5899
5900 case Token::MUL: {
5901 Label not_smi, slow;
5902 ASSERT(kSmiTag == 0); // adjust code below
5903 __ tst(r2, Operand(kSmiTagMask));
5904 __ b(ne, &not_smi);
5905 // Remove tag from one operand (but keep sign), so that result is Smi.
5906 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
5907 // Do multiplication
5908 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
5909 // Go slow on overflows (overflow bit is not set).
5910 __ mov(ip, Operand(r3, ASR, 31));
5911 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
5912 __ b(ne, &slow);
5913 // Go slow on zero result to handle -0.
5914 __ tst(r3, Operand(r3));
5915 __ mov(r0, Operand(r3), LeaveCC, ne);
5916 __ Ret(ne);
5917 // We need -0 if we were multiplying a negative number with 0 to get 0.
5918 // We know one of them was zero.
5919 __ add(r2, r0, Operand(r1), SetCC);
5920 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5921 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5922 // Slow case. We fall through here if we multiplied a negative number
5923 // with 0, because that would mean we should produce -0.
5924 __ bind(&slow);
5925
5926 HandleBinaryOpSlowCases(masm,
5927 &not_smi,
5928 Builtins::MUL,
5929 Token::MUL,
5930 mode_);
5931 break;
5932 }
5933
5934 case Token::DIV:
5935 case Token::MOD: {
5936 Label not_smi;
5937 if (specialized_on_rhs_) {
5938 Label smi_is_unsuitable;
5939 __ BranchOnNotSmi(r1, &not_smi);
5940 if (IsPowerOf2(constant_rhs_)) {
5941 if (op_ == Token::MOD) {
5942 __ and_(r0,
5943 r1,
5944 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
5945 SetCC);
5946 // We now have the answer, but if the input was negative we also
5947 // have the sign bit. Our work is done if the result is
5948 // positive or zero:
5949 __ Ret(pl);
5950 // A mod of a negative left hand side must return a negative number.
5951 // Unfortunately if the answer is 0 then we must return -0. And we
5952 // already optimistically trashed r0 so we may need to restore it.
5953 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5954 // Next two instructions are conditional on the answer being -0.
5955 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
5956 __ b(eq, &smi_is_unsuitable);
5957 // We need to subtract the dividend. Eg. -3 % 4 == -3.
5958 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
5959 } else {
5960 ASSERT(op_ == Token::DIV);
5961 __ tst(r1,
5962 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
5963 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5964 int shift = 0;
5965 int d = constant_rhs_;
5966 while ((d & 1) == 0) {
5967 d >>= 1;
5968 shift++;
5969 }
5970 __ mov(r0, Operand(r1, LSR, shift));
5971 __ bic(r0, r0, Operand(kSmiTagMask));
5972 }
5973 } else {
5974 // Not a power of 2.
5975 __ tst(r1, Operand(0x80000000u));
5976 __ b(ne, &smi_is_unsuitable);
5977 // Find a fixed point reciprocal of the divisor so we can divide by
5978 // multiplying.
5979 double divisor = 1.0 / constant_rhs_;
5980 int shift = 32;
5981 double scale = 4294967296.0; // 1 << 32.
5982 uint32_t mul;
5983 // Maximise the precision of the fixed point reciprocal.
5984 while (true) {
5985 mul = static_cast<uint32_t>(scale * divisor);
5986 if (mul >= 0x7fffffff) break;
5987 scale *= 2.0;
5988 shift++;
5989 }
5990 mul++;
5991 __ mov(r2, Operand(mul));
5992 __ umull(r3, r2, r2, r1);
5993 __ mov(r2, Operand(r2, LSR, shift - 31));
5994 // r2 is r1 / rhs. r2 is not Smi tagged.
5995 // r0 is still the known rhs. r0 is Smi tagged.
5996 // r1 is still the unkown lhs. r1 is Smi tagged.
5997 int required_r4_shift = 0; // Including the Smi tag shift of 1.
5998 // r4 = r2 * r0.
5999 MultiplyByKnownInt2(masm,
6000 r4,
6001 r2,
6002 r0,
6003 constant_rhs_,
6004 &required_r4_shift);
6005 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
6006 if (op_ == Token::DIV) {
6007 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
6008 __ b(ne, &smi_is_unsuitable); // There was a remainder.
6009 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
6010 } else {
6011 ASSERT(op_ == Token::MOD);
6012 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
6013 }
6014 }
6015 __ Ret();
6016 __ bind(&smi_is_unsuitable);
6017 } else {
6018 __ jmp(&not_smi);
6019 }
6020 HandleBinaryOpSlowCases(masm,
6021 &not_smi,
6022 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
6023 op_,
6024 mode_);
6025 break;
6026 }
6027
6028 case Token::BIT_OR:
6029 case Token::BIT_AND:
6030 case Token::BIT_XOR:
6031 case Token::SAR:
6032 case Token::SHR:
6033 case Token::SHL: {
6034 Label slow;
6035 ASSERT(kSmiTag == 0); // adjust code below
6036 __ tst(r2, Operand(kSmiTagMask));
6037 __ b(ne, &slow);
6038 switch (op_) {
6039 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
6040 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
6041 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
6042 case Token::SAR:
6043 // Remove tags from right operand.
6044 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
6045 // Use only the 5 least significant bits of the shift count.
6046 __ and_(r2, r2, Operand(0x1f));
6047 __ mov(r0, Operand(r1, ASR, r2));
6048 // Smi tag result.
6049 __ bic(r0, r0, Operand(kSmiTagMask));
6050 break;
6051 case Token::SHR:
6052 // Remove tags from operands. We can't do this on a 31 bit number
6053 // because then the 0s get shifted into bit 30 instead of bit 31.
6054 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
6055 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
6056 // Use only the 5 least significant bits of the shift count.
6057 __ and_(r2, r2, Operand(0x1f));
6058 __ mov(r3, Operand(r3, LSR, r2));
6059 // Unsigned shift is not allowed to produce a negative number, so
6060 // check the sign bit and the sign bit after Smi tagging.
6061 __ tst(r3, Operand(0xc0000000));
6062 __ b(ne, &slow);
6063 // Smi tag result.
6064 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6065 break;
6066 case Token::SHL:
6067 // Remove tags from operands.
6068 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
6069 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
6070 // Use only the 5 least significant bits of the shift count.
6071 __ and_(r2, r2, Operand(0x1f));
6072 __ mov(r3, Operand(r3, LSL, r2));
6073 // Check that the signed result fits in a Smi.
6074 __ add(r2, r3, Operand(0x40000000), SetCC);
6075 __ b(mi, &slow);
6076 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6077 break;
6078 default: UNREACHABLE();
6079 }
6080 __ Ret();
6081 __ bind(&slow);
6082 HandleNonSmiBitwiseOp(masm);
6083 break;
6084 }
6085
6086 default: UNREACHABLE();
6087 }
6088 // This code should be unreachable.
6089 __ stop("Unreachable");
6090}
6091
6092
6093void StackCheckStub::Generate(MacroAssembler* masm) {
6094 // Do tail-call to runtime routine. Runtime routines expect at least one
6095 // argument, so give it a Smi.
6096 __ mov(r0, Operand(Smi::FromInt(0)));
6097 __ push(r0);
6098 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
6099
6100 __ StubReturn(1);
6101}
6102
6103
Leon Clarkee46be812010-01-19 14:06:41 +00006104void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
6105 ASSERT(op_ == Token::SUB);
6106
Steve Blocka7e24c12009-10-30 11:49:00 +00006107 Label undo;
6108 Label slow;
6109 Label not_smi;
6110
6111 // Enter runtime system if the value is not a smi.
6112 __ tst(r0, Operand(kSmiTagMask));
6113 __ b(ne, &not_smi);
6114
6115 // Enter runtime system if the value of the expression is zero
6116 // to make sure that we switch between 0 and -0.
6117 __ cmp(r0, Operand(0));
6118 __ b(eq, &slow);
6119
6120 // The value of the expression is a smi that is not zero. Try
6121 // optimistic subtraction '0 - value'.
6122 __ rsb(r1, r0, Operand(0), SetCC);
6123 __ b(vs, &slow);
6124
6125 __ mov(r0, Operand(r1)); // Set r0 to result.
6126 __ StubReturn(1);
6127
6128 // Enter runtime system.
6129 __ bind(&slow);
6130 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006131 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
6132
6133 __ bind(&not_smi);
6134 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6135 __ b(ne, &slow);
6136 // r0 is a heap number. Get a new heap number in r1.
6137 if (overwrite_) {
6138 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6139 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6140 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6141 } else {
6142 AllocateHeapNumber(masm, &slow, r1, r2, r3);
6143 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6144 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6145 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
6146 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6147 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
6148 __ mov(r0, Operand(r1));
6149 }
6150 __ StubReturn(1);
6151}
6152
6153
6154int CEntryStub::MinorKey() {
6155 ASSERT(result_size_ <= 2);
6156 // Result returned in r0 or r0+r1 by default.
6157 return 0;
6158}
6159
6160
6161void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6162 // r0 holds the exception.
6163
6164 // Adjust this code if not the case.
6165 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6166
6167 // Drop the sp to the top of the handler.
6168 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6169 __ ldr(sp, MemOperand(r3));
6170
6171 // Restore the next handler and frame pointer, discard handler state.
6172 ASSERT(StackHandlerConstants::kNextOffset == 0);
6173 __ pop(r2);
6174 __ str(r2, MemOperand(r3));
6175 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6176 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
6177
6178 // Before returning we restore the context from the frame pointer if
6179 // not NULL. The frame pointer is NULL in the exception handler of a
6180 // JS entry frame.
6181 __ cmp(fp, Operand(0));
6182 // Set cp to NULL if fp is NULL.
6183 __ mov(cp, Operand(0), LeaveCC, eq);
6184 // Restore cp otherwise.
6185 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6186#ifdef DEBUG
6187 if (FLAG_debug_code) {
6188 __ mov(lr, Operand(pc));
6189 }
6190#endif
6191 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6192 __ pop(pc);
6193}
6194
6195
6196void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6197 UncatchableExceptionType type) {
6198 // Adjust this code if not the case.
6199 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6200
6201 // Drop sp to the top stack handler.
6202 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6203 __ ldr(sp, MemOperand(r3));
6204
6205 // Unwind the handlers until the ENTRY handler is found.
6206 Label loop, done;
6207 __ bind(&loop);
6208 // Load the type of the current stack handler.
6209 const int kStateOffset = StackHandlerConstants::kStateOffset;
6210 __ ldr(r2, MemOperand(sp, kStateOffset));
6211 __ cmp(r2, Operand(StackHandler::ENTRY));
6212 __ b(eq, &done);
6213 // Fetch the next handler in the list.
6214 const int kNextOffset = StackHandlerConstants::kNextOffset;
6215 __ ldr(sp, MemOperand(sp, kNextOffset));
6216 __ jmp(&loop);
6217 __ bind(&done);
6218
6219 // Set the top handler address to next handler past the current ENTRY handler.
6220 ASSERT(StackHandlerConstants::kNextOffset == 0);
6221 __ pop(r2);
6222 __ str(r2, MemOperand(r3));
6223
6224 if (type == OUT_OF_MEMORY) {
6225 // Set external caught exception to false.
6226 ExternalReference external_caught(Top::k_external_caught_exception_address);
6227 __ mov(r0, Operand(false));
6228 __ mov(r2, Operand(external_caught));
6229 __ str(r0, MemOperand(r2));
6230
6231 // Set pending exception and r0 to out of memory exception.
6232 Failure* out_of_memory = Failure::OutOfMemoryException();
6233 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6234 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
6235 __ str(r0, MemOperand(r2));
6236 }
6237
6238 // Stack layout at this point. See also StackHandlerConstants.
6239 // sp -> state (ENTRY)
6240 // fp
6241 // lr
6242
6243 // Discard handler state (r2 is not used) and restore frame pointer.
6244 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6245 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
6246 // Before returning we restore the context from the frame pointer if
6247 // not NULL. The frame pointer is NULL in the exception handler of a
6248 // JS entry frame.
6249 __ cmp(fp, Operand(0));
6250 // Set cp to NULL if fp is NULL.
6251 __ mov(cp, Operand(0), LeaveCC, eq);
6252 // Restore cp otherwise.
6253 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6254#ifdef DEBUG
6255 if (FLAG_debug_code) {
6256 __ mov(lr, Operand(pc));
6257 }
6258#endif
6259 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6260 __ pop(pc);
6261}
6262
6263
6264void CEntryStub::GenerateCore(MacroAssembler* masm,
6265 Label* throw_normal_exception,
6266 Label* throw_termination_exception,
6267 Label* throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006268 ExitFrame::Mode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006269 bool do_gc,
6270 bool always_allocate) {
6271 // r0: result parameter for PerformGC, if any
6272 // r4: number of arguments including receiver (C callee-saved)
6273 // r5: pointer to builtin function (C callee-saved)
6274 // r6: pointer to the first argument (C callee-saved)
6275
6276 if (do_gc) {
6277 // Passing r0.
6278 ExternalReference gc_reference = ExternalReference::perform_gc_function();
6279 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
6280 }
6281
6282 ExternalReference scope_depth =
6283 ExternalReference::heap_always_allocate_scope_depth();
6284 if (always_allocate) {
6285 __ mov(r0, Operand(scope_depth));
6286 __ ldr(r1, MemOperand(r0));
6287 __ add(r1, r1, Operand(1));
6288 __ str(r1, MemOperand(r0));
6289 }
6290
6291 // Call C built-in.
6292 // r0 = argc, r1 = argv
6293 __ mov(r0, Operand(r4));
6294 __ mov(r1, Operand(r6));
6295
6296 // TODO(1242173): To let the GC traverse the return address of the exit
6297 // frames, we need to know where the return address is. Right now,
6298 // we push it on the stack to be able to find it again, but we never
6299 // restore from it in case of changes, which makes it impossible to
6300 // support moving the C entry code stub. This should be fixed, but currently
6301 // this is OK because the CEntryStub gets generated so early in the V8 boot
6302 // sequence that it is not moving ever.
6303 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
6304 masm->push(lr);
6305 masm->Jump(r5);
6306
6307 if (always_allocate) {
6308 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
6309 // though (contain the result).
6310 __ mov(r2, Operand(scope_depth));
6311 __ ldr(r3, MemOperand(r2));
6312 __ sub(r3, r3, Operand(1));
6313 __ str(r3, MemOperand(r2));
6314 }
6315
6316 // check for failure result
6317 Label failure_returned;
6318 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6319 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
6320 __ add(r2, r0, Operand(1));
6321 __ tst(r2, Operand(kFailureTagMask));
6322 __ b(eq, &failure_returned);
6323
6324 // Exit C frame and return.
6325 // r0:r1: result
6326 // sp: stack pointer
6327 // fp: frame pointer
Steve Blockd0582a62009-12-15 09:54:21 +00006328 __ LeaveExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006329
6330 // check if we should retry or throw exception
6331 Label retry;
6332 __ bind(&failure_returned);
6333 ASSERT(Failure::RETRY_AFTER_GC == 0);
6334 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6335 __ b(eq, &retry);
6336
6337 // Special handling of out of memory exceptions.
6338 Failure* out_of_memory = Failure::OutOfMemoryException();
6339 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6340 __ b(eq, throw_out_of_memory_exception);
6341
6342 // Retrieve the pending exception and clear the variable.
6343 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6344 __ ldr(r3, MemOperand(ip));
6345 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6346 __ ldr(r0, MemOperand(ip));
6347 __ str(r3, MemOperand(ip));
6348
6349 // Special handling of termination exceptions which are uncatchable
6350 // by javascript code.
6351 __ cmp(r0, Operand(Factory::termination_exception()));
6352 __ b(eq, throw_termination_exception);
6353
6354 // Handle normal exception.
6355 __ jmp(throw_normal_exception);
6356
6357 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
6358}
6359
6360
6361void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
6362 // Called from JavaScript; parameters are on stack as if calling JS function
6363 // r0: number of arguments including receiver
6364 // r1: pointer to builtin function
6365 // fp: frame pointer (restored after C call)
6366 // sp: stack pointer (restored as callee's sp after C call)
6367 // cp: current context (C callee-saved)
6368
6369 // NOTE: Invocations of builtins may return failure objects
6370 // instead of a proper result. The builtin entry handles
6371 // this by performing a garbage collection and retrying the
6372 // builtin once.
6373
Steve Blockd0582a62009-12-15 09:54:21 +00006374 ExitFrame::Mode mode = is_debug_break
6375 ? ExitFrame::MODE_DEBUG
6376 : ExitFrame::MODE_NORMAL;
Steve Blocka7e24c12009-10-30 11:49:00 +00006377
6378 // Enter the exit frame that transitions from JavaScript to C++.
Steve Blockd0582a62009-12-15 09:54:21 +00006379 __ EnterExitFrame(mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00006380
6381 // r4: number of arguments (C callee-saved)
6382 // r5: pointer to builtin function (C callee-saved)
6383 // r6: pointer to first argument (C callee-saved)
6384
6385 Label throw_normal_exception;
6386 Label throw_termination_exception;
6387 Label throw_out_of_memory_exception;
6388
6389 // Call into the runtime system.
6390 GenerateCore(masm,
6391 &throw_normal_exception,
6392 &throw_termination_exception,
6393 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006394 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006395 false,
6396 false);
6397
6398 // Do space-specific GC and retry runtime call.
6399 GenerateCore(masm,
6400 &throw_normal_exception,
6401 &throw_termination_exception,
6402 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006403 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006404 true,
6405 false);
6406
6407 // Do full GC and retry runtime call one final time.
6408 Failure* failure = Failure::InternalError();
6409 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
6410 GenerateCore(masm,
6411 &throw_normal_exception,
6412 &throw_termination_exception,
6413 &throw_out_of_memory_exception,
Steve Blockd0582a62009-12-15 09:54:21 +00006414 mode,
Steve Blocka7e24c12009-10-30 11:49:00 +00006415 true,
6416 true);
6417
6418 __ bind(&throw_out_of_memory_exception);
6419 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
6420
6421 __ bind(&throw_termination_exception);
6422 GenerateThrowUncatchable(masm, TERMINATION);
6423
6424 __ bind(&throw_normal_exception);
6425 GenerateThrowTOS(masm);
6426}
6427
6428
6429void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
6430 // r0: code entry
6431 // r1: function
6432 // r2: receiver
6433 // r3: argc
6434 // [sp+0]: argv
6435
6436 Label invoke, exit;
6437
6438 // Called from C, so do not pop argc and args on exit (preserve sp)
6439 // No need to save register-passed args
6440 // Save callee-saved registers (incl. cp and fp), sp, and lr
6441 __ stm(db_w, sp, kCalleeSaved | lr.bit());
6442
6443 // Get address of argv, see stm above.
6444 // r0: code entry
6445 // r1: function
6446 // r2: receiver
6447 // r3: argc
6448 __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
6449 __ ldr(r4, MemOperand(r4)); // argv
6450
6451 // Push a frame with special values setup to mark it as an entry frame.
6452 // r0: code entry
6453 // r1: function
6454 // r2: receiver
6455 // r3: argc
6456 // r4: argv
6457 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
6458 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
6459 __ mov(r7, Operand(Smi::FromInt(marker)));
6460 __ mov(r6, Operand(Smi::FromInt(marker)));
6461 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6462 __ ldr(r5, MemOperand(r5));
6463 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
6464
6465 // Setup frame pointer for the frame to be pushed.
6466 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6467
6468 // Call a faked try-block that does the invoke.
6469 __ bl(&invoke);
6470
6471 // Caught exception: Store result (exception) in the pending
6472 // exception field in the JSEnv and return a failure sentinel.
6473 // Coming in here the fp will be invalid because the PushTryHandler below
6474 // sets it to 0 to signal the existence of the JSEntry frame.
6475 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6476 __ str(r0, MemOperand(ip));
6477 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6478 __ b(&exit);
6479
6480 // Invoke: Link this frame into the handler chain.
6481 __ bind(&invoke);
6482 // Must preserve r0-r4, r5-r7 are available.
6483 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6484 // If an exception not caught by another handler occurs, this handler
6485 // returns control to the code after the bl(&invoke) above, which
6486 // restores all kCalleeSaved registers (including cp and fp) to their
6487 // saved values before returning a failure to C.
6488
6489 // Clear any pending exceptions.
6490 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6491 __ ldr(r5, MemOperand(ip));
6492 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6493 __ str(r5, MemOperand(ip));
6494
6495 // Invoke the function by calling through JS entry trampoline builtin.
6496 // Notice that we cannot store a reference to the trampoline code directly in
6497 // this stub, because runtime stubs are not traversed when doing GC.
6498
6499 // Expected registers by Builtins::JSEntryTrampoline
6500 // r0: code entry
6501 // r1: function
6502 // r2: receiver
6503 // r3: argc
6504 // r4: argv
6505 if (is_construct) {
6506 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6507 __ mov(ip, Operand(construct_entry));
6508 } else {
6509 ExternalReference entry(Builtins::JSEntryTrampoline);
6510 __ mov(ip, Operand(entry));
6511 }
6512 __ ldr(ip, MemOperand(ip)); // deref address
6513
6514 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6515 // macro for the add instruction because we don't want the coverage tool
6516 // inserting instructions here after we read the pc.
6517 __ mov(lr, Operand(pc));
6518 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6519
6520 // Unlink this frame from the handler chain. When reading the
6521 // address of the next handler, there is no need to use the address
6522 // displacement since the current stack pointer (sp) points directly
6523 // to the stack handler.
6524 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6525 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6526 __ str(r3, MemOperand(ip));
6527 // No need to restore registers
6528 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6529
6530
6531 __ bind(&exit); // r0 holds result
6532 // Restore the top frame descriptors from the stack.
6533 __ pop(r3);
6534 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6535 __ str(r3, MemOperand(ip));
6536
6537 // Reset the stack to the callee saved registers.
6538 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6539
6540 // Restore callee-saved registers and return.
6541#ifdef DEBUG
6542 if (FLAG_debug_code) {
6543 __ mov(lr, Operand(pc));
6544 }
6545#endif
6546 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6547}
6548
6549
6550// This stub performs an instanceof, calling the builtin function if
6551// necessary. Uses r1 for the object, r0 for the function that it may
6552// be an instance of (these are fetched from the stack).
6553void InstanceofStub::Generate(MacroAssembler* masm) {
6554 // Get the object - slow case for smis (we may need to throw an exception
6555 // depending on the rhs).
6556 Label slow, loop, is_instance, is_not_instance;
6557 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6558 __ BranchOnSmi(r0, &slow);
6559
6560 // Check that the left hand is a JS object and put map in r3.
6561 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6562 __ b(lt, &slow);
6563 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6564 __ b(gt, &slow);
6565
6566 // Get the prototype of the function (r4 is result, r2 is scratch).
6567 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6568 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6569
6570 // Check that the function prototype is a JS object.
6571 __ BranchOnSmi(r4, &slow);
6572 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6573 __ b(lt, &slow);
6574 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6575 __ b(gt, &slow);
6576
6577 // Register mapping: r3 is object map and r4 is function prototype.
6578 // Get prototype of object into r2.
6579 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6580
6581 // Loop through the prototype chain looking for the function prototype.
6582 __ bind(&loop);
6583 __ cmp(r2, Operand(r4));
6584 __ b(eq, &is_instance);
6585 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6586 __ cmp(r2, ip);
6587 __ b(eq, &is_not_instance);
6588 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6589 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6590 __ jmp(&loop);
6591
6592 __ bind(&is_instance);
6593 __ mov(r0, Operand(Smi::FromInt(0)));
6594 __ pop();
6595 __ pop();
6596 __ mov(pc, Operand(lr)); // Return.
6597
6598 __ bind(&is_not_instance);
6599 __ mov(r0, Operand(Smi::FromInt(1)));
6600 __ pop();
6601 __ pop();
6602 __ mov(pc, Operand(lr)); // Return.
6603
6604 // Slow-case. Tail call builtin.
6605 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006606 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6607}
6608
6609
6610void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6611 // Check if the calling frame is an arguments adaptor frame.
6612 Label adaptor;
6613 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6614 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6615 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6616 __ b(eq, &adaptor);
6617
6618 // Nothing to do: The formal number of parameters has already been
6619 // passed in register r0 by calling function. Just return it.
6620 __ Jump(lr);
6621
6622 // Arguments adaptor case: Read the arguments length from the
6623 // adaptor frame and return it.
6624 __ bind(&adaptor);
6625 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6626 __ Jump(lr);
6627}
6628
6629
6630void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6631 // The displacement is the offset of the last parameter (if any)
6632 // relative to the frame pointer.
6633 static const int kDisplacement =
6634 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6635
6636 // Check that the key is a smi.
6637 Label slow;
6638 __ BranchOnNotSmi(r1, &slow);
6639
6640 // Check if the calling frame is an arguments adaptor frame.
6641 Label adaptor;
6642 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6643 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6644 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6645 __ b(eq, &adaptor);
6646
6647 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00006648 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00006649 // check for free.
6650 __ cmp(r1, r0);
6651 __ b(cs, &slow);
6652
6653 // Read the argument from the stack and return it.
6654 __ sub(r3, r0, r1);
6655 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6656 __ ldr(r0, MemOperand(r3, kDisplacement));
6657 __ Jump(lr);
6658
6659 // Arguments adaptor case: Check index against actual arguments
6660 // limit found in the arguments adaptor frame. Use unsigned
6661 // comparison to get negative check for free.
6662 __ bind(&adaptor);
6663 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6664 __ cmp(r1, r0);
6665 __ b(cs, &slow);
6666
6667 // Read the argument from the adaptor frame and return it.
6668 __ sub(r3, r0, r1);
6669 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6670 __ ldr(r0, MemOperand(r3, kDisplacement));
6671 __ Jump(lr);
6672
6673 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6674 // by calling the runtime system.
6675 __ bind(&slow);
6676 __ push(r1);
6677 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6678}
6679
6680
6681void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6682 // Check if the calling frame is an arguments adaptor frame.
6683 Label runtime;
6684 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6685 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6686 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6687 __ b(ne, &runtime);
6688
6689 // Patch the arguments.length and the parameters pointer.
6690 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6691 __ str(r0, MemOperand(sp, 0 * kPointerSize));
6692 __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6693 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6694 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6695
6696 // Do the runtime call to allocate the arguments object.
6697 __ bind(&runtime);
6698 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6699}
6700
6701
6702void CallFunctionStub::Generate(MacroAssembler* masm) {
6703 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00006704
6705 // If the receiver might be a value (string, number or boolean) check for this
6706 // and box it if it is.
6707 if (ReceiverMightBeValue()) {
6708 // Get the receiver from the stack.
6709 // function, receiver [, arguments]
6710 Label receiver_is_value, receiver_is_js_object;
6711 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
6712
6713 // Check if receiver is a smi (which is a number value).
6714 __ BranchOnSmi(r1, &receiver_is_value);
6715
6716 // Check if the receiver is a valid JS object.
6717 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
6718 __ b(ge, &receiver_is_js_object);
6719
6720 // Call the runtime to box the value.
6721 __ bind(&receiver_is_value);
6722 __ EnterInternalFrame();
6723 __ push(r1);
6724 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
6725 __ LeaveInternalFrame();
6726 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
6727
6728 __ bind(&receiver_is_js_object);
6729 }
6730
Steve Blocka7e24c12009-10-30 11:49:00 +00006731 // Get the function to call from the stack.
6732 // function, receiver [, arguments]
6733 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6734
6735 // Check that the function is really a JavaScript function.
6736 // r1: pushed function (to be verified)
6737 __ BranchOnSmi(r1, &slow);
6738 // Get the map of the function object.
6739 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6740 __ b(ne, &slow);
6741
6742 // Fast-case: Invoke the function now.
6743 // r1: pushed function
6744 ParameterCount actual(argc_);
6745 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6746
6747 // Slow-case: Non-function called.
6748 __ bind(&slow);
6749 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6750 __ mov(r2, Operand(0));
6751 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6752 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6753 RelocInfo::CODE_TARGET);
6754}
6755
6756
Leon Clarkee46be812010-01-19 14:06:41 +00006757const char* CompareStub::GetName() {
6758 switch (cc_) {
6759 case lt: return "CompareStub_LT";
6760 case gt: return "CompareStub_GT";
6761 case le: return "CompareStub_LE";
6762 case ge: return "CompareStub_GE";
6763 case ne: {
6764 if (strict_) {
6765 if (never_nan_nan_) {
6766 return "CompareStub_NE_STRICT_NO_NAN";
6767 } else {
6768 return "CompareStub_NE_STRICT";
6769 }
6770 } else {
6771 if (never_nan_nan_) {
6772 return "CompareStub_NE_NO_NAN";
6773 } else {
6774 return "CompareStub_NE";
6775 }
6776 }
6777 }
6778 case eq: {
6779 if (strict_) {
6780 if (never_nan_nan_) {
6781 return "CompareStub_EQ_STRICT_NO_NAN";
6782 } else {
6783 return "CompareStub_EQ_STRICT";
6784 }
6785 } else {
6786 if (never_nan_nan_) {
6787 return "CompareStub_EQ_NO_NAN";
6788 } else {
6789 return "CompareStub_EQ";
6790 }
6791 }
6792 }
6793 default: return "CompareStub";
6794 }
6795}
6796
6797
Steve Blocka7e24c12009-10-30 11:49:00 +00006798int CompareStub::MinorKey() {
Leon Clarkee46be812010-01-19 14:06:41 +00006799 // Encode the three parameters in a unique 16 bit value.
6800 ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
6801 int nnn_value = (never_nan_nan_ ? 2 : 0);
6802 if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
6803 return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006804}
6805
6806
Leon Clarked91b9f72010-01-27 17:25:45 +00006807
6808
6809void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6810 Register left,
6811 Register right,
6812 Register scratch1,
6813 Register scratch2,
6814 Register scratch3,
6815 Register scratch4) {
6816 Label compare_lengths;
6817 // Find minimum length and length difference.
6818 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6819 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6820 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6821 Register length_delta = scratch3;
6822 __ mov(scratch1, scratch2, LeaveCC, gt);
6823 Register min_length = scratch1;
6824 __ tst(min_length, Operand(min_length));
6825 __ b(eq, &compare_lengths);
6826
6827 // Setup registers so that we only need to increment one register
6828 // in the loop.
6829 __ add(scratch2, min_length,
6830 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6831 __ add(left, left, Operand(scratch2));
6832 __ add(right, right, Operand(scratch2));
6833 // Registers left and right points to the min_length character of strings.
6834 __ rsb(min_length, min_length, Operand(-1));
6835 Register index = min_length;
6836 // Index starts at -min_length.
6837
6838 {
6839 // Compare loop.
6840 Label loop;
6841 __ bind(&loop);
6842 // Compare characters.
6843 __ add(index, index, Operand(1), SetCC);
6844 __ ldrb(scratch2, MemOperand(left, index), ne);
6845 __ ldrb(scratch4, MemOperand(right, index), ne);
6846 // Skip to compare lengths with eq condition true.
6847 __ b(eq, &compare_lengths);
6848 __ cmp(scratch2, scratch4);
6849 __ b(eq, &loop);
6850 // Fallthrough with eq condition false.
6851 }
6852 // Compare lengths - strings up to min-length are equal.
6853 __ bind(&compare_lengths);
6854 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6855 // Use zero length_delta as result.
6856 __ mov(r0, Operand(length_delta), SetCC, eq);
6857 // Fall through to here if characters compare not-equal.
6858 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6859 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6860 __ Ret();
6861}
6862
6863
6864void StringCompareStub::Generate(MacroAssembler* masm) {
6865 Label runtime;
6866
6867 // Stack frame on entry.
6868 // sp[0]: return address
6869 // sp[4]: right string
6870 // sp[8]: left string
6871
6872 __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
6873 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
6874
6875 Label not_same;
6876 __ cmp(r0, r1);
6877 __ b(ne, &not_same);
6878 ASSERT_EQ(0, EQUAL);
6879 ASSERT_EQ(0, kSmiTag);
6880 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6881 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
6882 __ add(sp, sp, Operand(2 * kPointerSize));
6883 __ Ret();
6884
6885 __ bind(&not_same);
6886
6887 // Check that both objects are sequential ascii strings.
6888 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
6889
6890 // Compare flat ascii strings natively. Remove arguments from stack first.
6891 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
6892 __ add(sp, sp, Operand(2 * kPointerSize));
6893 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
6894
6895 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6896 // tagged as a small integer.
6897 __ bind(&runtime);
6898 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
6899}
6900
6901
Steve Blocka7e24c12009-10-30 11:49:00 +00006902#undef __
6903
6904} } // namespace v8::internal