blob: e47d392105bb38d6d85444be6efeb875b675ce7e [file] [log] [blame]
Leon Clarked91b9f72010-01-27 17:25:45 +00001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39
40namespace v8 {
41namespace internal {
42
43#define __ ACCESS_MASM(masm_)
44
45static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000047 Condition cc,
48 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000049static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000050 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000051 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
55static void MultiplyByKnownInt(MacroAssembler* masm,
56 Register source,
57 Register destination,
58 int known_int);
59static bool IsEasyToMultiplyBy(int x);
60
61
62
63// -------------------------------------------------------------------------
64// Platform-specific DeferredCode functions.
65
66void DeferredCode::SaveRegisters() {
67 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
68 int action = registers_[i];
69 if (action == kPush) {
70 __ push(RegisterAllocator::ToRegister(i));
71 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
72 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
73 }
74 }
75}
76
77
78void DeferredCode::RestoreRegisters() {
79 // Restore registers in reverse order due to the stack.
80 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
81 int action = registers_[i];
82 if (action == kPush) {
83 __ pop(RegisterAllocator::ToRegister(i));
84 } else if (action != kIgnore) {
85 action &= ~kSyncedFlag;
86 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
87 }
88 }
89}
90
91
92// -------------------------------------------------------------------------
93// CodeGenState implementation.
94
95CodeGenState::CodeGenState(CodeGenerator* owner)
96 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000097 true_target_(NULL),
98 false_target_(NULL),
99 previous_(NULL) {
100 owner_->set_state(this);
101}
102
103
104CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 JumpTarget* true_target,
106 JumpTarget* false_target)
107 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 true_target_(true_target),
109 false_target_(false_target),
110 previous_(owner->state()) {
111 owner_->set_state(this);
112}
113
114
115CodeGenState::~CodeGenState() {
116 ASSERT(owner_->state() == this);
117 owner_->set_state(previous_);
118}
119
120
121// -------------------------------------------------------------------------
122// CodeGenerator implementation
123
Andrei Popescu31002712010-02-23 13:46:05 +0000124CodeGenerator::CodeGenerator(MacroAssembler* masm)
125 : deferred_(8),
Leon Clarke4515c472010-02-03 11:58:03 +0000126 masm_(masm),
Andrei Popescu31002712010-02-23 13:46:05 +0000127 info_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000128 frame_(NULL),
129 allocator_(NULL),
130 cc_reg_(al),
131 state_(NULL),
132 function_return_is_shadowed_(false) {
133}
134
135
Andrei Popescu31002712010-02-23 13:46:05 +0000136Scope* CodeGenerator::scope() { return info_->function()->scope(); }
137
138
Steve Blocka7e24c12009-10-30 11:49:00 +0000139// Calling conventions:
140// fp: caller's frame pointer
141// sp: stack pointer
142// r1: called JS function
143// cp: callee's context
144
Andrei Popescu402d9372010-02-26 13:31:12 +0000145void CodeGenerator::Generate(CompilationInfo* info) {
Steve Blockd0582a62009-12-15 09:54:21 +0000146 // Record the position for debugging purposes.
Andrei Popescu31002712010-02-23 13:46:05 +0000147 CodeForFunctionPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000148
149 // Initialize state.
Andrei Popescu31002712010-02-23 13:46:05 +0000150 info_ = info;
Steve Blocka7e24c12009-10-30 11:49:00 +0000151 ASSERT(allocator_ == NULL);
152 RegisterAllocator register_allocator(this);
153 allocator_ = &register_allocator;
154 ASSERT(frame_ == NULL);
155 frame_ = new VirtualFrame();
156 cc_reg_ = al;
157 {
158 CodeGenState state(this);
159
160 // Entry:
161 // Stack: receiver, arguments
162 // lr: return address
163 // fp: caller's frame pointer
164 // sp: stack pointer
165 // r1: called JS function
166 // cp: callee's context
167 allocator_->Initialize();
Leon Clarke4515c472010-02-03 11:58:03 +0000168
Steve Blocka7e24c12009-10-30 11:49:00 +0000169#ifdef DEBUG
170 if (strlen(FLAG_stop_at) > 0 &&
Andrei Popescu31002712010-02-23 13:46:05 +0000171 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 frame_->SpillAll();
173 __ stop("stop-at");
174 }
175#endif
176
Andrei Popescu402d9372010-02-26 13:31:12 +0000177 if (info->mode() == CompilationInfo::PRIMARY) {
Leon Clarke4515c472010-02-03 11:58:03 +0000178 frame_->Enter();
179 // tos: code slot
180
181 // Allocate space for locals and initialize them. This also checks
182 // for stack overflow.
183 frame_->AllocateStackSlots();
184
185 VirtualFrame::SpilledScope spilled_scope;
Andrei Popescu31002712010-02-23 13:46:05 +0000186 int heap_slots = scope()->num_heap_slots();
Leon Clarke4515c472010-02-03 11:58:03 +0000187 if (heap_slots > 0) {
188 // Allocate local context.
189 // Get outer context and create a new context based on it.
190 __ ldr(r0, frame_->Function());
191 frame_->EmitPush(r0);
192 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
193 FastNewContextStub stub(heap_slots);
194 frame_->CallStub(&stub, 1);
195 } else {
196 frame_->CallRuntime(Runtime::kNewContext, 1);
197 }
198
199#ifdef DEBUG
200 JumpTarget verified_true;
201 __ cmp(r0, Operand(cp));
202 verified_true.Branch(eq);
203 __ stop("NewContext: r0 is expected to be the same as cp");
204 verified_true.Bind();
205#endif
206 // Update context local.
207 __ str(cp, frame_->Context());
208 }
209
210 // TODO(1241774): Improve this code:
211 // 1) only needed if we have a context
212 // 2) no need to recompute context ptr every single time
213 // 3) don't copy parameter operand code from SlotOperand!
214 {
215 Comment cmnt2(masm_, "[ copy context parameters into .context");
Leon Clarke4515c472010-02-03 11:58:03 +0000216 // Note that iteration order is relevant here! If we have the same
217 // parameter twice (e.g., function (x, y, x)), and that parameter
218 // needs to be copied into the context, it must be the last argument
219 // passed to the parameter that needs to be copied. This is a rare
220 // case so we don't check for it, instead we rely on the copying
221 // order: such a parameter is copied repeatedly into the same
222 // context location and thus the last value is what is seen inside
223 // the function.
Andrei Popescu31002712010-02-23 13:46:05 +0000224 for (int i = 0; i < scope()->num_parameters(); i++) {
225 Variable* par = scope()->parameter(i);
Leon Clarke4515c472010-02-03 11:58:03 +0000226 Slot* slot = par->slot();
227 if (slot != NULL && slot->type() == Slot::CONTEXT) {
Andrei Popescu31002712010-02-23 13:46:05 +0000228 ASSERT(!scope()->is_global_scope()); // No params in global scope.
Leon Clarke4515c472010-02-03 11:58:03 +0000229 __ ldr(r1, frame_->ParameterAt(i));
230 // Loads r2 with context; used below in RecordWrite.
231 __ str(r1, SlotOperand(slot, r2));
232 // Load the offset into r3.
233 int slot_offset =
234 FixedArray::kHeaderSize + slot->index() * kPointerSize;
235 __ mov(r3, Operand(slot_offset));
236 __ RecordWrite(r2, r3, r1);
237 }
238 }
239 }
240
241 // Store the arguments object. This must happen after context
242 // initialization because the arguments object may be stored in the
243 // context.
Andrei Popescu31002712010-02-23 13:46:05 +0000244 if (scope()->arguments() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000245 Comment cmnt(masm_, "[ allocate arguments object");
Andrei Popescu31002712010-02-23 13:46:05 +0000246 ASSERT(scope()->arguments_shadow() != NULL);
247 Variable* arguments = scope()->arguments()->var();
248 Variable* shadow = scope()->arguments_shadow()->var();
Leon Clarke4515c472010-02-03 11:58:03 +0000249 ASSERT(arguments != NULL && arguments->slot() != NULL);
250 ASSERT(shadow != NULL && shadow->slot() != NULL);
251 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
252 __ ldr(r2, frame_->Function());
253 // The receiver is below the arguments, the return address, and the
254 // frame pointer on the stack.
Andrei Popescu31002712010-02-23 13:46:05 +0000255 const int kReceiverDisplacement = 2 + scope()->num_parameters();
Leon Clarke4515c472010-02-03 11:58:03 +0000256 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
Andrei Popescu31002712010-02-23 13:46:05 +0000257 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Leon Clarke4515c472010-02-03 11:58:03 +0000258 frame_->Adjust(3);
259 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
260 frame_->CallStub(&stub, 3);
261 frame_->EmitPush(r0);
262 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
263 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
264 frame_->Drop(); // Value is no longer needed.
265 }
266
267 // Initialize ThisFunction reference if present.
Andrei Popescu31002712010-02-23 13:46:05 +0000268 if (scope()->is_function_scope() && scope()->function() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000269 __ mov(ip, Operand(Factory::the_hole_value()));
270 frame_->EmitPush(ip);
Andrei Popescu31002712010-02-23 13:46:05 +0000271 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
Leon Clarke4515c472010-02-03 11:58:03 +0000272 }
273 } else {
274 // When used as the secondary compiler for splitting, r1, cp,
275 // fp, and lr have been pushed on the stack. Adjust the virtual
276 // frame to match this state.
277 frame_->Adjust(4);
278 allocator_->Unuse(r1);
279 allocator_->Unuse(lr);
Andrei Popescu402d9372010-02-26 13:31:12 +0000280
281 // Bind all the bailout labels to the beginning of the function.
282 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
283 for (int i = 0; i < bailouts->length(); i++) {
284 __ bind(bailouts->at(i)->label());
285 }
Leon Clarke4515c472010-02-03 11:58:03 +0000286 }
287
Steve Blocka7e24c12009-10-30 11:49:00 +0000288 // Initialize the function return target after the locals are set
289 // up, because it needs the expected frame height from the frame.
290 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
291 function_return_is_shadowed_ = false;
292
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 // Generate code to 'execute' declarations and initialize functions
294 // (source elements). In case of an illegal redeclaration we need to
295 // handle that instead of processing the declarations.
Andrei Popescu31002712010-02-23 13:46:05 +0000296 if (scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000297 Comment cmnt(masm_, "[ illegal redeclarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000298 scope()->VisitIllegalRedeclaration(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000299 } else {
300 Comment cmnt(masm_, "[ declarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000301 ProcessDeclarations(scope()->declarations());
Steve Blocka7e24c12009-10-30 11:49:00 +0000302 // Bail out if a stack-overflow exception occurred when processing
303 // declarations.
304 if (HasStackOverflow()) return;
305 }
306
307 if (FLAG_trace) {
308 frame_->CallRuntime(Runtime::kTraceEnter, 0);
309 // Ignore the return value.
310 }
311
312 // Compile the body of the function in a vanilla state. Don't
313 // bother compiling all the code if the scope has an illegal
314 // redeclaration.
Andrei Popescu31002712010-02-23 13:46:05 +0000315 if (!scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000316 Comment cmnt(masm_, "[ function body");
317#ifdef DEBUG
318 bool is_builtin = Bootstrapper::IsActive();
319 bool should_trace =
320 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
321 if (should_trace) {
322 frame_->CallRuntime(Runtime::kDebugTrace, 0);
323 // Ignore the return value.
324 }
325#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000326 VisitStatementsAndSpill(info->function()->body());
Steve Blocka7e24c12009-10-30 11:49:00 +0000327 }
328 }
329
330 // Generate the return sequence if necessary.
331 if (has_valid_frame() || function_return_.is_linked()) {
332 if (!function_return_.is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000333 CodeForReturnPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000334 }
335 // exit
336 // r0: result
337 // sp: stack pointer
338 // fp: frame pointer
339 // cp: callee's context
340 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
341
342 function_return_.Bind();
343 if (FLAG_trace) {
344 // Push the return value on the stack as the parameter.
345 // Runtime::TraceExit returns the parameter as it is.
346 frame_->EmitPush(r0);
347 frame_->CallRuntime(Runtime::kTraceExit, 1);
348 }
349
350 // Add a label for checking the size of the code used for returning.
351 Label check_exit_codesize;
352 masm_->bind(&check_exit_codesize);
353
Steve Blockd0582a62009-12-15 09:54:21 +0000354 // Calculate the exact length of the return sequence and make sure that
355 // the constant pool is not emitted inside of the return sequence.
Andrei Popescu31002712010-02-23 13:46:05 +0000356 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
Steve Blockd0582a62009-12-15 09:54:21 +0000357 int return_sequence_length = Assembler::kJSReturnSequenceLength;
358 if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
359 // Additional mov instruction generated.
360 return_sequence_length++;
361 }
362 masm_->BlockConstPoolFor(return_sequence_length);
363
Steve Blocka7e24c12009-10-30 11:49:00 +0000364 // Tear down the frame which will restore the caller's frame pointer and
365 // the link register.
366 frame_->Exit();
367
368 // Here we use masm_-> instead of the __ macro to avoid the code coverage
369 // tool from instrumenting as we rely on the code size here.
Steve Blockd0582a62009-12-15 09:54:21 +0000370 masm_->add(sp, sp, Operand(sp_delta));
Steve Blocka7e24c12009-10-30 11:49:00 +0000371 masm_->Jump(lr);
372
373 // Check that the size of the code used for returning matches what is
Steve Blockd0582a62009-12-15 09:54:21 +0000374 // expected by the debugger. The add instruction above is an addressing
375 // mode 1 instruction where there are restrictions on which immediate values
376 // can be encoded in the instruction and which immediate values requires
377 // use of an additional instruction for moving the immediate to a temporary
378 // register.
379 ASSERT_EQ(return_sequence_length,
Steve Blocka7e24c12009-10-30 11:49:00 +0000380 masm_->InstructionsGeneratedSince(&check_exit_codesize));
381 }
382
383 // Code generation state must be reset.
384 ASSERT(!has_cc());
385 ASSERT(state_ == NULL);
386 ASSERT(!function_return_is_shadowed_);
387 function_return_.Unuse();
388 DeleteFrame();
389
390 // Process any deferred code using the register allocator.
391 if (!HasStackOverflow()) {
392 ProcessDeferred();
393 }
394
395 allocator_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000396}
397
398
399MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
400 // Currently, this assertion will fail if we try to assign to
401 // a constant variable that is constant because it is read-only
402 // (such as the variable referring to a named function expression).
403 // We need to implement assignments to read-only variables.
404 // Ideally, we should do this during AST generation (by converting
405 // such assignments into expression statements); however, in general
406 // we may not be able to make the decision until past AST generation,
407 // that is when the entire program is known.
408 ASSERT(slot != NULL);
409 int index = slot->index();
410 switch (slot->type()) {
411 case Slot::PARAMETER:
412 return frame_->ParameterAt(index);
413
414 case Slot::LOCAL:
415 return frame_->LocalAt(index);
416
417 case Slot::CONTEXT: {
418 // Follow the context chain if necessary.
419 ASSERT(!tmp.is(cp)); // do not overwrite context register
420 Register context = cp;
421 int chain_length = scope()->ContextChainLength(slot->var()->scope());
422 for (int i = 0; i < chain_length; i++) {
423 // Load the closure.
424 // (All contexts, even 'with' contexts, have a closure,
425 // and it is the same for all contexts inside a function.
426 // There is no need to go to the function context first.)
427 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
428 // Load the function context (which is the incoming, outer context).
429 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
430 context = tmp;
431 }
432 // We may have a 'with' context now. Get the function context.
433 // (In fact this mov may never be the needed, since the scope analysis
434 // may not permit a direct context access in this case and thus we are
435 // always at a function context. However it is safe to dereference be-
436 // cause the function context of a function context is itself. Before
437 // deleting this mov we should try to create a counter-example first,
438 // though...)
439 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
440 return ContextOperand(tmp, index);
441 }
442
443 default:
444 UNREACHABLE();
445 return MemOperand(r0, 0);
446 }
447}
448
449
450MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
451 Slot* slot,
452 Register tmp,
453 Register tmp2,
454 JumpTarget* slow) {
455 ASSERT(slot->type() == Slot::CONTEXT);
456 Register context = cp;
457
458 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
459 if (s->num_heap_slots() > 0) {
460 if (s->calls_eval()) {
461 // Check that extension is NULL.
462 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
463 __ tst(tmp2, tmp2);
464 slow->Branch(ne);
465 }
466 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
467 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
468 context = tmp;
469 }
470 }
471 // Check that last extension is NULL.
472 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
473 __ tst(tmp2, tmp2);
474 slow->Branch(ne);
475 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
476 return ContextOperand(tmp, slot->index());
477}
478
479
480// Loads a value on TOS. If it is a boolean value, the result may have been
481// (partially) translated into branches, or it may have set the condition
482// code register. If force_cc is set, the value is forced to set the
483// condition code register and no value is pushed. If the condition code
484// register was set, has_cc() is true and cc_reg_ contains the condition to
485// test for 'true'.
486void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000487 JumpTarget* true_target,
488 JumpTarget* false_target,
489 bool force_cc) {
490 ASSERT(!has_cc());
491 int original_height = frame_->height();
492
Steve Blockd0582a62009-12-15 09:54:21 +0000493 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000494 Visit(x);
495
496 // If we hit a stack overflow, we may not have actually visited
497 // the expression. In that case, we ensure that we have a
498 // valid-looking frame state because we will continue to generate
499 // code as we unwind the C++ stack.
500 //
501 // It's possible to have both a stack overflow and a valid frame
502 // state (eg, a subexpression overflowed, visiting it returned
503 // with a dummied frame state, and visiting this expression
504 // returned with a normal-looking state).
505 if (HasStackOverflow() &&
506 has_valid_frame() &&
507 !has_cc() &&
508 frame_->height() == original_height) {
509 true_target->Jump();
510 }
511 }
512 if (force_cc && frame_ != NULL && !has_cc()) {
513 // Convert the TOS value to a boolean in the condition code register.
514 ToBoolean(true_target, false_target);
515 }
516 ASSERT(!force_cc || !has_valid_frame() || has_cc());
517 ASSERT(!has_valid_frame() ||
518 (has_cc() && frame_->height() == original_height) ||
519 (!has_cc() && frame_->height() == original_height + 1));
520}
521
522
Steve Blockd0582a62009-12-15 09:54:21 +0000523void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000524#ifdef DEBUG
525 int original_height = frame_->height();
526#endif
527 JumpTarget true_target;
528 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000529 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000530
531 if (has_cc()) {
532 // Convert cc_reg_ into a boolean value.
533 JumpTarget loaded;
534 JumpTarget materialize_true;
535 materialize_true.Branch(cc_reg_);
536 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
537 frame_->EmitPush(r0);
538 loaded.Jump();
539 materialize_true.Bind();
540 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
541 frame_->EmitPush(r0);
542 loaded.Bind();
543 cc_reg_ = al;
544 }
545
546 if (true_target.is_linked() || false_target.is_linked()) {
547 // We have at least one condition value that has been "translated"
548 // into a branch, thus it needs to be loaded explicitly.
549 JumpTarget loaded;
550 if (frame_ != NULL) {
551 loaded.Jump(); // Don't lose the current TOS.
552 }
553 bool both = true_target.is_linked() && false_target.is_linked();
554 // Load "true" if necessary.
555 if (true_target.is_linked()) {
556 true_target.Bind();
557 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
558 frame_->EmitPush(r0);
559 }
560 // If both "true" and "false" need to be loaded jump across the code for
561 // "false".
562 if (both) {
563 loaded.Jump();
564 }
565 // Load "false" if necessary.
566 if (false_target.is_linked()) {
567 false_target.Bind();
568 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
569 frame_->EmitPush(r0);
570 }
571 // A value is loaded on all paths reaching this point.
572 loaded.Bind();
573 }
574 ASSERT(has_valid_frame());
575 ASSERT(!has_cc());
576 ASSERT(frame_->height() == original_height + 1);
577}
578
579
580void CodeGenerator::LoadGlobal() {
581 VirtualFrame::SpilledScope spilled_scope;
582 __ ldr(r0, GlobalObject());
583 frame_->EmitPush(r0);
584}
585
586
587void CodeGenerator::LoadGlobalReceiver(Register scratch) {
588 VirtualFrame::SpilledScope spilled_scope;
589 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
590 __ ldr(scratch,
591 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
592 frame_->EmitPush(scratch);
593}
594
595
Steve Blockd0582a62009-12-15 09:54:21 +0000596void CodeGenerator::LoadTypeofExpression(Expression* expr) {
597 // Special handling of identifiers as subexpressions of typeof.
Steve Blocka7e24c12009-10-30 11:49:00 +0000598 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +0000599 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000600 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000601 // For a global variable we build the property reference
602 // <global>.<variable> and perform a (regular non-contextual) property
603 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000604 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
605 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000606 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000607 Reference ref(this, &property);
608 ref.GetValueAndSpill();
609 } else if (variable != NULL && variable->slot() != NULL) {
610 // For a variable that rewrites to a slot, we signal it is the immediate
611 // subexpression of a typeof.
612 LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
613 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000614 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000615 // Anything else can be handled normally.
616 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000617 }
618}
619
620
Leon Clarked91b9f72010-01-27 17:25:45 +0000621Reference::Reference(CodeGenerator* cgen,
622 Expression* expression,
623 bool persist_after_get)
624 : cgen_(cgen),
625 expression_(expression),
626 type_(ILLEGAL),
627 persist_after_get_(persist_after_get) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000628 cgen->LoadReference(this);
629}
630
631
632Reference::~Reference() {
Leon Clarked91b9f72010-01-27 17:25:45 +0000633 ASSERT(is_unloaded() || is_illegal());
Steve Blocka7e24c12009-10-30 11:49:00 +0000634}
635
636
637void CodeGenerator::LoadReference(Reference* ref) {
638 VirtualFrame::SpilledScope spilled_scope;
639 Comment cmnt(masm_, "[ LoadReference");
640 Expression* e = ref->expression();
641 Property* property = e->AsProperty();
642 Variable* var = e->AsVariableProxy()->AsVariable();
643
644 if (property != NULL) {
645 // The expression is either a property or a variable proxy that rewrites
646 // to a property.
647 LoadAndSpill(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000648 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000649 ref->set_type(Reference::NAMED);
650 } else {
651 LoadAndSpill(property->key());
652 ref->set_type(Reference::KEYED);
653 }
654 } else if (var != NULL) {
655 // The expression is a variable proxy that does not rewrite to a
656 // property. Global variables are treated as named property references.
657 if (var->is_global()) {
658 LoadGlobal();
659 ref->set_type(Reference::NAMED);
660 } else {
661 ASSERT(var->slot() != NULL);
662 ref->set_type(Reference::SLOT);
663 }
664 } else {
665 // Anything else is a runtime error.
666 LoadAndSpill(e);
667 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
668 }
669}
670
671
672void CodeGenerator::UnloadReference(Reference* ref) {
673 VirtualFrame::SpilledScope spilled_scope;
674 // Pop a reference from the stack while preserving TOS.
675 Comment cmnt(masm_, "[ UnloadReference");
676 int size = ref->size();
677 if (size > 0) {
678 frame_->EmitPop(r0);
679 frame_->Drop(size);
680 frame_->EmitPush(r0);
681 }
Leon Clarked91b9f72010-01-27 17:25:45 +0000682 ref->set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +0000683}
684
685
686// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
687// register to a boolean in the condition code register. The code
688// may jump to 'false_target' in case the register converts to 'false'.
689void CodeGenerator::ToBoolean(JumpTarget* true_target,
690 JumpTarget* false_target) {
691 VirtualFrame::SpilledScope spilled_scope;
692 // Note: The generated code snippet does not change stack variables.
693 // Only the condition code should be set.
694 frame_->EmitPop(r0);
695
696 // Fast case checks
697
698 // Check if the value is 'false'.
699 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
700 __ cmp(r0, ip);
701 false_target->Branch(eq);
702
703 // Check if the value is 'true'.
704 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
705 __ cmp(r0, ip);
706 true_target->Branch(eq);
707
708 // Check if the value is 'undefined'.
709 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
710 __ cmp(r0, ip);
711 false_target->Branch(eq);
712
713 // Check if the value is a smi.
714 __ cmp(r0, Operand(Smi::FromInt(0)));
715 false_target->Branch(eq);
716 __ tst(r0, Operand(kSmiTagMask));
717 true_target->Branch(eq);
718
719 // Slow case: call the runtime.
720 frame_->EmitPush(r0);
721 frame_->CallRuntime(Runtime::kToBool, 1);
722 // Convert the result (r0) to a condition code.
723 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
724 __ cmp(r0, ip);
725
726 cc_reg_ = ne;
727}
728
729
730void CodeGenerator::GenericBinaryOperation(Token::Value op,
731 OverwriteMode overwrite_mode,
732 int constant_rhs) {
733 VirtualFrame::SpilledScope spilled_scope;
734 // sp[0] : y
735 // sp[1] : x
736 // result : r0
737
738 // Stub is entered with a call: 'return address' is in lr.
739 switch (op) {
740 case Token::ADD: // fall through.
741 case Token::SUB: // fall through.
742 case Token::MUL:
743 case Token::DIV:
744 case Token::MOD:
745 case Token::BIT_OR:
746 case Token::BIT_AND:
747 case Token::BIT_XOR:
748 case Token::SHL:
749 case Token::SHR:
750 case Token::SAR: {
751 frame_->EmitPop(r0); // r0 : y
752 frame_->EmitPop(r1); // r1 : x
753 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
754 frame_->CallStub(&stub, 0);
755 break;
756 }
757
758 case Token::COMMA:
759 frame_->EmitPop(r0);
760 // simply discard left value
761 frame_->Drop();
762 break;
763
764 default:
765 // Other cases should have been handled before this point.
766 UNREACHABLE();
767 break;
768 }
769}
770
771
772class DeferredInlineSmiOperation: public DeferredCode {
773 public:
774 DeferredInlineSmiOperation(Token::Value op,
775 int value,
776 bool reversed,
777 OverwriteMode overwrite_mode)
778 : op_(op),
779 value_(value),
780 reversed_(reversed),
781 overwrite_mode_(overwrite_mode) {
782 set_comment("[ DeferredInlinedSmiOperation");
783 }
784
785 virtual void Generate();
786
787 private:
788 Token::Value op_;
789 int value_;
790 bool reversed_;
791 OverwriteMode overwrite_mode_;
792};
793
794
795void DeferredInlineSmiOperation::Generate() {
796 switch (op_) {
797 case Token::ADD: {
798 // Revert optimistic add.
799 if (reversed_) {
800 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
801 __ mov(r1, Operand(Smi::FromInt(value_)));
802 } else {
803 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
804 __ mov(r0, Operand(Smi::FromInt(value_)));
805 }
806 break;
807 }
808
809 case Token::SUB: {
810 // Revert optimistic sub.
811 if (reversed_) {
812 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
813 __ mov(r1, Operand(Smi::FromInt(value_)));
814 } else {
815 __ add(r1, r0, Operand(Smi::FromInt(value_)));
816 __ mov(r0, Operand(Smi::FromInt(value_)));
817 }
818 break;
819 }
820
821 // For these operations there is no optimistic operation that needs to be
822 // reverted.
823 case Token::MUL:
824 case Token::MOD:
825 case Token::BIT_OR:
826 case Token::BIT_XOR:
827 case Token::BIT_AND: {
828 if (reversed_) {
829 __ mov(r1, Operand(Smi::FromInt(value_)));
830 } else {
831 __ mov(r1, Operand(r0));
832 __ mov(r0, Operand(Smi::FromInt(value_)));
833 }
834 break;
835 }
836
837 case Token::SHL:
838 case Token::SHR:
839 case Token::SAR: {
840 if (!reversed_) {
841 __ mov(r1, Operand(r0));
842 __ mov(r0, Operand(Smi::FromInt(value_)));
843 } else {
844 UNREACHABLE(); // Should have been handled in SmiOperation.
845 }
846 break;
847 }
848
849 default:
850 // Other cases should have been handled before this point.
851 UNREACHABLE();
852 break;
853 }
854
855 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
856 __ CallStub(&stub);
857}
858
859
860static bool PopCountLessThanEqual2(unsigned int x) {
861 x &= x - 1;
862 return (x & (x - 1)) == 0;
863}
864
865
866// Returns the index of the lowest bit set.
867static int BitPosition(unsigned x) {
868 int bit_posn = 0;
869 while ((x & 0xf) == 0) {
870 bit_posn += 4;
871 x >>= 4;
872 }
873 while ((x & 1) == 0) {
874 bit_posn++;
875 x >>= 1;
876 }
877 return bit_posn;
878}
879
880
881void CodeGenerator::SmiOperation(Token::Value op,
882 Handle<Object> value,
883 bool reversed,
884 OverwriteMode mode) {
885 VirtualFrame::SpilledScope spilled_scope;
886 // NOTE: This is an attempt to inline (a bit) more of the code for
887 // some possible smi operations (like + and -) when (at least) one
888 // of the operands is a literal smi. With this optimization, the
889 // performance of the system is increased by ~15%, and the generated
890 // code size is increased by ~1% (measured on a combination of
891 // different benchmarks).
892
893 // sp[0] : operand
894
895 int int_value = Smi::cast(*value)->value();
896
897 JumpTarget exit;
898 frame_->EmitPop(r0);
899
900 bool something_to_inline = true;
901 switch (op) {
902 case Token::ADD: {
903 DeferredCode* deferred =
904 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
905
906 __ add(r0, r0, Operand(value), SetCC);
907 deferred->Branch(vs);
908 __ tst(r0, Operand(kSmiTagMask));
909 deferred->Branch(ne);
910 deferred->BindExit();
911 break;
912 }
913
914 case Token::SUB: {
915 DeferredCode* deferred =
916 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
917
918 if (reversed) {
919 __ rsb(r0, r0, Operand(value), SetCC);
920 } else {
921 __ sub(r0, r0, Operand(value), SetCC);
922 }
923 deferred->Branch(vs);
924 __ tst(r0, Operand(kSmiTagMask));
925 deferred->Branch(ne);
926 deferred->BindExit();
927 break;
928 }
929
930
931 case Token::BIT_OR:
932 case Token::BIT_XOR:
933 case Token::BIT_AND: {
934 DeferredCode* deferred =
935 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
936 __ tst(r0, Operand(kSmiTagMask));
937 deferred->Branch(ne);
938 switch (op) {
939 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
940 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
941 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
942 default: UNREACHABLE();
943 }
944 deferred->BindExit();
945 break;
946 }
947
948 case Token::SHL:
949 case Token::SHR:
950 case Token::SAR: {
951 if (reversed) {
952 something_to_inline = false;
953 break;
954 }
955 int shift_value = int_value & 0x1f; // least significant 5 bits
956 DeferredCode* deferred =
957 new DeferredInlineSmiOperation(op, shift_value, false, mode);
958 __ tst(r0, Operand(kSmiTagMask));
959 deferred->Branch(ne);
960 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
961 switch (op) {
962 case Token::SHL: {
963 if (shift_value != 0) {
964 __ mov(r2, Operand(r2, LSL, shift_value));
965 }
966 // check that the *unsigned* result fits in a smi
967 __ add(r3, r2, Operand(0x40000000), SetCC);
968 deferred->Branch(mi);
969 break;
970 }
971 case Token::SHR: {
972 // LSR by immediate 0 means shifting 32 bits.
973 if (shift_value != 0) {
974 __ mov(r2, Operand(r2, LSR, shift_value));
975 }
976 // check that the *unsigned* result fits in a smi
977 // neither of the two high-order bits can be set:
978 // - 0x80000000: high bit would be lost when smi tagging
979 // - 0x40000000: this number would convert to negative when
980 // smi tagging these two cases can only happen with shifts
981 // by 0 or 1 when handed a valid smi
982 __ and_(r3, r2, Operand(0xc0000000), SetCC);
983 deferred->Branch(ne);
984 break;
985 }
986 case Token::SAR: {
987 if (shift_value != 0) {
988 // ASR by immediate 0 means shifting 32 bits.
989 __ mov(r2, Operand(r2, ASR, shift_value));
990 }
991 break;
992 }
993 default: UNREACHABLE();
994 }
995 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
996 deferred->BindExit();
997 break;
998 }
999
1000 case Token::MOD: {
1001 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1002 something_to_inline = false;
1003 break;
1004 }
1005 DeferredCode* deferred =
1006 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1007 unsigned mask = (0x80000000u | kSmiTagMask);
1008 __ tst(r0, Operand(mask));
1009 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1010 mask = (int_value << kSmiTagSize) - 1;
1011 __ and_(r0, r0, Operand(mask));
1012 deferred->BindExit();
1013 break;
1014 }
1015
1016 case Token::MUL: {
1017 if (!IsEasyToMultiplyBy(int_value)) {
1018 something_to_inline = false;
1019 break;
1020 }
1021 DeferredCode* deferred =
1022 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1023 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1024 max_smi_that_wont_overflow <<= kSmiTagSize;
1025 unsigned mask = 0x80000000u;
1026 while ((mask & max_smi_that_wont_overflow) == 0) {
1027 mask |= mask >> 1;
1028 }
1029 mask |= kSmiTagMask;
1030 // This does a single mask that checks for a too high value in a
1031 // conservative way and for a non-Smi. It also filters out negative
1032 // numbers, unfortunately, but since this code is inline we prefer
1033 // brevity to comprehensiveness.
1034 __ tst(r0, Operand(mask));
1035 deferred->Branch(ne);
1036 MultiplyByKnownInt(masm_, r0, r0, int_value);
1037 deferred->BindExit();
1038 break;
1039 }
1040
1041 default:
1042 something_to_inline = false;
1043 break;
1044 }
1045
1046 if (!something_to_inline) {
1047 if (!reversed) {
1048 frame_->EmitPush(r0);
1049 __ mov(r0, Operand(value));
1050 frame_->EmitPush(r0);
1051 GenericBinaryOperation(op, mode, int_value);
1052 } else {
1053 __ mov(ip, Operand(value));
1054 frame_->EmitPush(ip);
1055 frame_->EmitPush(r0);
1056 GenericBinaryOperation(op, mode, kUnknownIntValue);
1057 }
1058 }
1059
1060 exit.Bind();
1061}
1062
1063
1064void CodeGenerator::Comparison(Condition cc,
1065 Expression* left,
1066 Expression* right,
1067 bool strict) {
1068 if (left != NULL) LoadAndSpill(left);
1069 if (right != NULL) LoadAndSpill(right);
1070
1071 VirtualFrame::SpilledScope spilled_scope;
1072 // sp[0] : y
1073 // sp[1] : x
1074 // result : cc register
1075
1076 // Strict only makes sense for equality comparisons.
1077 ASSERT(!strict || cc == eq);
1078
1079 JumpTarget exit;
1080 JumpTarget smi;
1081 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1082 if (cc == gt || cc == le) {
1083 cc = ReverseCondition(cc);
1084 frame_->EmitPop(r1);
1085 frame_->EmitPop(r0);
1086 } else {
1087 frame_->EmitPop(r0);
1088 frame_->EmitPop(r1);
1089 }
1090 __ orr(r2, r0, Operand(r1));
1091 __ tst(r2, Operand(kSmiTagMask));
1092 smi.Branch(eq);
1093
1094 // Perform non-smi comparison by stub.
1095 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1096 // We call with 0 args because there are 0 on the stack.
1097 CompareStub stub(cc, strict);
1098 frame_->CallStub(&stub, 0);
1099 __ cmp(r0, Operand(0));
1100 exit.Jump();
1101
1102 // Do smi comparisons by pointer comparison.
1103 smi.Bind();
1104 __ cmp(r1, Operand(r0));
1105
1106 exit.Bind();
1107 cc_reg_ = cc;
1108}
1109
1110
Steve Blocka7e24c12009-10-30 11:49:00 +00001111// Call the function on the stack with the given arguments.
1112void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001113 CallFunctionFlags flags,
1114 int position) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001115 VirtualFrame::SpilledScope spilled_scope;
1116 // Push the arguments ("left-to-right") on the stack.
1117 int arg_count = args->length();
1118 for (int i = 0; i < arg_count; i++) {
1119 LoadAndSpill(args->at(i));
1120 }
1121
1122 // Record the position for debugging purposes.
1123 CodeForSourcePosition(position);
1124
1125 // Use the shared code stub to call the function.
1126 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001127 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001128 frame_->CallStub(&call_function, arg_count + 1);
1129
1130 // Restore context and pop function from the stack.
1131 __ ldr(cp, frame_->Context());
1132 frame_->Drop(); // discard the TOS
1133}
1134
1135
1136void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1137 VirtualFrame::SpilledScope spilled_scope;
1138 ASSERT(has_cc());
1139 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1140 target->Branch(cc);
1141 cc_reg_ = al;
1142}
1143
1144
1145void CodeGenerator::CheckStack() {
1146 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +00001147 Comment cmnt(masm_, "[ check stack");
1148 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1149 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1150 // the implicit 8 byte offset that always applies to operations with pc and
1151 // gives a return address 12 bytes down.
1152 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1153 masm_->cmp(sp, Operand(ip));
1154 StackCheckStub stub;
1155 // Call the stub if lower.
1156 masm_->mov(pc,
1157 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1158 RelocInfo::CODE_TARGET),
1159 LeaveCC,
1160 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001161}
1162
1163
1164void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1165#ifdef DEBUG
1166 int original_height = frame_->height();
1167#endif
1168 VirtualFrame::SpilledScope spilled_scope;
1169 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1170 VisitAndSpill(statements->at(i));
1171 }
1172 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1173}
1174
1175
1176void CodeGenerator::VisitBlock(Block* node) {
1177#ifdef DEBUG
1178 int original_height = frame_->height();
1179#endif
1180 VirtualFrame::SpilledScope spilled_scope;
1181 Comment cmnt(masm_, "[ Block");
1182 CodeForStatementPosition(node);
1183 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1184 VisitStatementsAndSpill(node->statements());
1185 if (node->break_target()->is_linked()) {
1186 node->break_target()->Bind();
1187 }
1188 node->break_target()->Unuse();
1189 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1190}
1191
1192
1193void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1194 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001195 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001196 __ mov(r0, Operand(pairs));
1197 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001198 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1199 frame_->EmitPush(r0);
1200 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1201 // The result is discarded.
1202}
1203
1204
1205void CodeGenerator::VisitDeclaration(Declaration* node) {
1206#ifdef DEBUG
1207 int original_height = frame_->height();
1208#endif
1209 VirtualFrame::SpilledScope spilled_scope;
1210 Comment cmnt(masm_, "[ Declaration");
1211 Variable* var = node->proxy()->var();
1212 ASSERT(var != NULL); // must have been resolved
1213 Slot* slot = var->slot();
1214
1215 // If it was not possible to allocate the variable at compile time,
1216 // we need to "declare" it at runtime to make sure it actually
1217 // exists in the local context.
1218 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1219 // Variables with a "LOOKUP" slot were introduced as non-locals
1220 // during variable resolution and must have mode DYNAMIC.
1221 ASSERT(var->is_dynamic());
1222 // For now, just do a runtime call.
1223 frame_->EmitPush(cp);
1224 __ mov(r0, Operand(var->name()));
1225 frame_->EmitPush(r0);
1226 // Declaration nodes are always declared in only two modes.
1227 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1228 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1229 __ mov(r0, Operand(Smi::FromInt(attr)));
1230 frame_->EmitPush(r0);
1231 // Push initial value, if any.
1232 // Note: For variables we must not push an initial value (such as
1233 // 'undefined') because we may have a (legal) redeclaration and we
1234 // must not destroy the current value.
1235 if (node->mode() == Variable::CONST) {
1236 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1237 frame_->EmitPush(r0);
1238 } else if (node->fun() != NULL) {
1239 LoadAndSpill(node->fun());
1240 } else {
1241 __ mov(r0, Operand(0)); // no initial value!
1242 frame_->EmitPush(r0);
1243 }
1244 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1245 // Ignore the return value (declarations are statements).
1246 ASSERT(frame_->height() == original_height);
1247 return;
1248 }
1249
1250 ASSERT(!var->is_global());
1251
1252 // If we have a function or a constant, we need to initialize the variable.
1253 Expression* val = NULL;
1254 if (node->mode() == Variable::CONST) {
1255 val = new Literal(Factory::the_hole_value());
1256 } else {
1257 val = node->fun(); // NULL if we don't have a function
1258 }
1259
1260 if (val != NULL) {
1261 {
1262 // Set initial value.
1263 Reference target(this, node->proxy());
1264 LoadAndSpill(val);
1265 target.SetValue(NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 }
1267 // Get rid of the assigned value (declarations are statements).
1268 frame_->Drop();
1269 }
1270 ASSERT(frame_->height() == original_height);
1271}
1272
1273
1274void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1275#ifdef DEBUG
1276 int original_height = frame_->height();
1277#endif
1278 VirtualFrame::SpilledScope spilled_scope;
1279 Comment cmnt(masm_, "[ ExpressionStatement");
1280 CodeForStatementPosition(node);
1281 Expression* expression = node->expression();
1282 expression->MarkAsStatement();
1283 LoadAndSpill(expression);
1284 frame_->Drop();
1285 ASSERT(frame_->height() == original_height);
1286}
1287
1288
1289void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1290#ifdef DEBUG
1291 int original_height = frame_->height();
1292#endif
1293 VirtualFrame::SpilledScope spilled_scope;
1294 Comment cmnt(masm_, "// EmptyStatement");
1295 CodeForStatementPosition(node);
1296 // nothing to do
1297 ASSERT(frame_->height() == original_height);
1298}
1299
1300
1301void CodeGenerator::VisitIfStatement(IfStatement* node) {
1302#ifdef DEBUG
1303 int original_height = frame_->height();
1304#endif
1305 VirtualFrame::SpilledScope spilled_scope;
1306 Comment cmnt(masm_, "[ IfStatement");
1307 // Generate different code depending on which parts of the if statement
1308 // are present or not.
1309 bool has_then_stm = node->HasThenStatement();
1310 bool has_else_stm = node->HasElseStatement();
1311
1312 CodeForStatementPosition(node);
1313
1314 JumpTarget exit;
1315 if (has_then_stm && has_else_stm) {
1316 Comment cmnt(masm_, "[ IfThenElse");
1317 JumpTarget then;
1318 JumpTarget else_;
1319 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001320 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001321 if (frame_ != NULL) {
1322 Branch(false, &else_);
1323 }
1324 // then
1325 if (frame_ != NULL || then.is_linked()) {
1326 then.Bind();
1327 VisitAndSpill(node->then_statement());
1328 }
1329 if (frame_ != NULL) {
1330 exit.Jump();
1331 }
1332 // else
1333 if (else_.is_linked()) {
1334 else_.Bind();
1335 VisitAndSpill(node->else_statement());
1336 }
1337
1338 } else if (has_then_stm) {
1339 Comment cmnt(masm_, "[ IfThen");
1340 ASSERT(!has_else_stm);
1341 JumpTarget then;
1342 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001343 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001344 if (frame_ != NULL) {
1345 Branch(false, &exit);
1346 }
1347 // then
1348 if (frame_ != NULL || then.is_linked()) {
1349 then.Bind();
1350 VisitAndSpill(node->then_statement());
1351 }
1352
1353 } else if (has_else_stm) {
1354 Comment cmnt(masm_, "[ IfElse");
1355 ASSERT(!has_then_stm);
1356 JumpTarget else_;
1357 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001358 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 if (frame_ != NULL) {
1360 Branch(true, &exit);
1361 }
1362 // else
1363 if (frame_ != NULL || else_.is_linked()) {
1364 else_.Bind();
1365 VisitAndSpill(node->else_statement());
1366 }
1367
1368 } else {
1369 Comment cmnt(masm_, "[ If");
1370 ASSERT(!has_then_stm && !has_else_stm);
1371 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001372 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001373 if (frame_ != NULL) {
1374 if (has_cc()) {
1375 cc_reg_ = al;
1376 } else {
1377 frame_->Drop();
1378 }
1379 }
1380 }
1381
1382 // end
1383 if (exit.is_linked()) {
1384 exit.Bind();
1385 }
1386 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1387}
1388
1389
1390void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1391 VirtualFrame::SpilledScope spilled_scope;
1392 Comment cmnt(masm_, "[ ContinueStatement");
1393 CodeForStatementPosition(node);
1394 node->target()->continue_target()->Jump();
1395}
1396
1397
1398void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1399 VirtualFrame::SpilledScope spilled_scope;
1400 Comment cmnt(masm_, "[ BreakStatement");
1401 CodeForStatementPosition(node);
1402 node->target()->break_target()->Jump();
1403}
1404
1405
1406void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1407 VirtualFrame::SpilledScope spilled_scope;
1408 Comment cmnt(masm_, "[ ReturnStatement");
1409
1410 CodeForStatementPosition(node);
1411 LoadAndSpill(node->expression());
1412 if (function_return_is_shadowed_) {
1413 frame_->EmitPop(r0);
1414 function_return_.Jump();
1415 } else {
1416 // Pop the result from the frame and prepare the frame for
1417 // returning thus making it easier to merge.
1418 frame_->EmitPop(r0);
1419 frame_->PrepareForReturn();
1420
1421 function_return_.Jump();
1422 }
1423}
1424
1425
1426void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1427#ifdef DEBUG
1428 int original_height = frame_->height();
1429#endif
1430 VirtualFrame::SpilledScope spilled_scope;
1431 Comment cmnt(masm_, "[ WithEnterStatement");
1432 CodeForStatementPosition(node);
1433 LoadAndSpill(node->expression());
1434 if (node->is_catch_block()) {
1435 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1436 } else {
1437 frame_->CallRuntime(Runtime::kPushContext, 1);
1438 }
1439#ifdef DEBUG
1440 JumpTarget verified_true;
1441 __ cmp(r0, Operand(cp));
1442 verified_true.Branch(eq);
1443 __ stop("PushContext: r0 is expected to be the same as cp");
1444 verified_true.Bind();
1445#endif
1446 // Update context local.
1447 __ str(cp, frame_->Context());
1448 ASSERT(frame_->height() == original_height);
1449}
1450
1451
1452void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1453#ifdef DEBUG
1454 int original_height = frame_->height();
1455#endif
1456 VirtualFrame::SpilledScope spilled_scope;
1457 Comment cmnt(masm_, "[ WithExitStatement");
1458 CodeForStatementPosition(node);
1459 // Pop context.
1460 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1461 // Update context local.
1462 __ str(cp, frame_->Context());
1463 ASSERT(frame_->height() == original_height);
1464}
1465
1466
1467void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1468#ifdef DEBUG
1469 int original_height = frame_->height();
1470#endif
1471 VirtualFrame::SpilledScope spilled_scope;
1472 Comment cmnt(masm_, "[ SwitchStatement");
1473 CodeForStatementPosition(node);
1474 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1475
1476 LoadAndSpill(node->tag());
1477
1478 JumpTarget next_test;
1479 JumpTarget fall_through;
1480 JumpTarget default_entry;
1481 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1482 ZoneList<CaseClause*>* cases = node->cases();
1483 int length = cases->length();
1484 CaseClause* default_clause = NULL;
1485
1486 for (int i = 0; i < length; i++) {
1487 CaseClause* clause = cases->at(i);
1488 if (clause->is_default()) {
1489 // Remember the default clause and compile it at the end.
1490 default_clause = clause;
1491 continue;
1492 }
1493
1494 Comment cmnt(masm_, "[ Case clause");
1495 // Compile the test.
1496 next_test.Bind();
1497 next_test.Unuse();
1498 // Duplicate TOS.
1499 __ ldr(r0, frame_->Top());
1500 frame_->EmitPush(r0);
1501 Comparison(eq, NULL, clause->label(), true);
1502 Branch(false, &next_test);
1503
1504 // Before entering the body from the test, remove the switch value from
1505 // the stack.
1506 frame_->Drop();
1507
1508 // Label the body so that fall through is enabled.
1509 if (i > 0 && cases->at(i - 1)->is_default()) {
1510 default_exit.Bind();
1511 } else {
1512 fall_through.Bind();
1513 fall_through.Unuse();
1514 }
1515 VisitStatementsAndSpill(clause->statements());
1516
1517 // If control flow can fall through from the body, jump to the next body
1518 // or the end of the statement.
1519 if (frame_ != NULL) {
1520 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1521 default_entry.Jump();
1522 } else {
1523 fall_through.Jump();
1524 }
1525 }
1526 }
1527
1528 // The final "test" removes the switch value.
1529 next_test.Bind();
1530 frame_->Drop();
1531
1532 // If there is a default clause, compile it.
1533 if (default_clause != NULL) {
1534 Comment cmnt(masm_, "[ Default clause");
1535 default_entry.Bind();
1536 VisitStatementsAndSpill(default_clause->statements());
1537 // If control flow can fall out of the default and there is a case after
1538 // it, jup to that case's body.
1539 if (frame_ != NULL && default_exit.is_bound()) {
1540 default_exit.Jump();
1541 }
1542 }
1543
1544 if (fall_through.is_linked()) {
1545 fall_through.Bind();
1546 }
1547
1548 if (node->break_target()->is_linked()) {
1549 node->break_target()->Bind();
1550 }
1551 node->break_target()->Unuse();
1552 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1553}
1554
1555
Steve Block3ce2e202009-11-05 08:53:23 +00001556void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001557#ifdef DEBUG
1558 int original_height = frame_->height();
1559#endif
1560 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001561 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001562 CodeForStatementPosition(node);
1563 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001564 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001565
Steve Block3ce2e202009-11-05 08:53:23 +00001566 // Label the top of the loop for the backward CFG edge. If the test
1567 // is always true we can use the continue target, and if the test is
1568 // always false there is no need.
1569 ConditionAnalysis info = AnalyzeCondition(node->cond());
1570 switch (info) {
1571 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001572 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1573 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001574 break;
1575 case ALWAYS_FALSE:
1576 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1577 break;
1578 case DONT_KNOW:
1579 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1580 body.Bind();
1581 break;
1582 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001583
Steve Block3ce2e202009-11-05 08:53:23 +00001584 CheckStack(); // TODO(1222600): ignore if body contains calls.
1585 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001586
Steve Blockd0582a62009-12-15 09:54:21 +00001587 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001588 switch (info) {
1589 case ALWAYS_TRUE:
1590 // If control can fall off the end of the body, jump back to the
1591 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001592 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001593 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 }
1595 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001596 case ALWAYS_FALSE:
1597 // If we have a continue in the body, we only have to bind its
1598 // jump target.
1599 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 }
Steve Block3ce2e202009-11-05 08:53:23 +00001602 break;
1603 case DONT_KNOW:
1604 // We have to compile the test expression if it can be reached by
1605 // control flow falling out of the body or via continue.
1606 if (node->continue_target()->is_linked()) {
1607 node->continue_target()->Bind();
1608 }
1609 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001610 Comment cmnt(masm_, "[ DoWhileCondition");
1611 CodeForDoWhileConditionPosition(node);
1612 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001613 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001614 // A invalid frame here indicates that control did not
1615 // fall out of the test expression.
1616 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001617 }
1618 }
1619 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001620 }
1621
1622 if (node->break_target()->is_linked()) {
1623 node->break_target()->Bind();
1624 }
Steve Block3ce2e202009-11-05 08:53:23 +00001625 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1626}
1627
1628
1629void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1630#ifdef DEBUG
1631 int original_height = frame_->height();
1632#endif
1633 VirtualFrame::SpilledScope spilled_scope;
1634 Comment cmnt(masm_, "[ WhileStatement");
1635 CodeForStatementPosition(node);
1636
1637 // If the test is never true and has no side effects there is no need
1638 // to compile the test or body.
1639 ConditionAnalysis info = AnalyzeCondition(node->cond());
1640 if (info == ALWAYS_FALSE) return;
1641
1642 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1643
1644 // Label the top of the loop with the continue target for the backward
1645 // CFG edge.
1646 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1647 node->continue_target()->Bind();
1648
1649 if (info == DONT_KNOW) {
1650 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001651 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001652 if (has_valid_frame()) {
1653 // A NULL frame indicates that control did not fall out of the
1654 // test expression.
1655 Branch(false, node->break_target());
1656 }
1657 if (has_valid_frame() || body.is_linked()) {
1658 body.Bind();
1659 }
1660 }
1661
1662 if (has_valid_frame()) {
1663 CheckStack(); // TODO(1222600): ignore if body contains calls.
1664 VisitAndSpill(node->body());
1665
1666 // If control flow can fall out of the body, jump back to the top.
1667 if (has_valid_frame()) {
1668 node->continue_target()->Jump();
1669 }
1670 }
1671 if (node->break_target()->is_linked()) {
1672 node->break_target()->Bind();
1673 }
1674 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1675}
1676
1677
1678void CodeGenerator::VisitForStatement(ForStatement* node) {
1679#ifdef DEBUG
1680 int original_height = frame_->height();
1681#endif
1682 VirtualFrame::SpilledScope spilled_scope;
1683 Comment cmnt(masm_, "[ ForStatement");
1684 CodeForStatementPosition(node);
1685 if (node->init() != NULL) {
1686 VisitAndSpill(node->init());
1687 }
1688
1689 // If the test is never true there is no need to compile the test or
1690 // body.
1691 ConditionAnalysis info = AnalyzeCondition(node->cond());
1692 if (info == ALWAYS_FALSE) return;
1693
1694 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1695
1696 // If there is no update statement, label the top of the loop with the
1697 // continue target, otherwise with the loop target.
1698 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1699 if (node->next() == NULL) {
1700 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1701 node->continue_target()->Bind();
1702 } else {
1703 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1704 loop.Bind();
1705 }
1706
1707 // If the test is always true, there is no need to compile it.
1708 if (info == DONT_KNOW) {
1709 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001710 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001711 if (has_valid_frame()) {
1712 Branch(false, node->break_target());
1713 }
1714 if (has_valid_frame() || body.is_linked()) {
1715 body.Bind();
1716 }
1717 }
1718
1719 if (has_valid_frame()) {
1720 CheckStack(); // TODO(1222600): ignore if body contains calls.
1721 VisitAndSpill(node->body());
1722
1723 if (node->next() == NULL) {
1724 // If there is no update statement and control flow can fall out
1725 // of the loop, jump directly to the continue label.
1726 if (has_valid_frame()) {
1727 node->continue_target()->Jump();
1728 }
1729 } else {
1730 // If there is an update statement and control flow can reach it
1731 // via falling out of the body of the loop or continuing, we
1732 // compile the update statement.
1733 if (node->continue_target()->is_linked()) {
1734 node->continue_target()->Bind();
1735 }
1736 if (has_valid_frame()) {
1737 // Record source position of the statement as this code which is
1738 // after the code for the body actually belongs to the loop
1739 // statement and not the body.
1740 CodeForStatementPosition(node);
1741 VisitAndSpill(node->next());
1742 loop.Jump();
1743 }
1744 }
1745 }
1746 if (node->break_target()->is_linked()) {
1747 node->break_target()->Bind();
1748 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001749 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1750}
1751
1752
1753void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1754#ifdef DEBUG
1755 int original_height = frame_->height();
1756#endif
1757 VirtualFrame::SpilledScope spilled_scope;
1758 Comment cmnt(masm_, "[ ForInStatement");
1759 CodeForStatementPosition(node);
1760
1761 JumpTarget primitive;
1762 JumpTarget jsobject;
1763 JumpTarget fixed_array;
1764 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1765 JumpTarget end_del_check;
1766 JumpTarget exit;
1767
1768 // Get the object to enumerate over (converted to JSObject).
1769 LoadAndSpill(node->enumerable());
1770
1771 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1772 // to the specification. 12.6.4 mandates a call to ToObject.
1773 frame_->EmitPop(r0);
1774 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1775 __ cmp(r0, ip);
1776 exit.Branch(eq);
1777 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1778 __ cmp(r0, ip);
1779 exit.Branch(eq);
1780
1781 // Stack layout in body:
1782 // [iteration counter (Smi)]
1783 // [length of array]
1784 // [FixedArray]
1785 // [Map or 0]
1786 // [Object]
1787
1788 // Check if enumerable is already a JSObject
1789 __ tst(r0, Operand(kSmiTagMask));
1790 primitive.Branch(eq);
1791 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1792 jsobject.Branch(hs);
1793
1794 primitive.Bind();
1795 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00001796 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001797
1798 jsobject.Bind();
1799 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00001800 // r0: value to be iterated over
1801 frame_->EmitPush(r0); // Push the object being iterated over.
1802
1803 // Check cache validity in generated code. This is a fast case for
1804 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1805 // guarantee cache validity, call the runtime system to check cache
1806 // validity or get the property names in a fixed array.
1807 JumpTarget call_runtime;
1808 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1809 JumpTarget check_prototype;
1810 JumpTarget use_cache;
1811 __ mov(r1, Operand(r0));
1812 loop.Bind();
1813 // Check that there are no elements.
1814 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
1815 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
1816 __ cmp(r2, r4);
1817 call_runtime.Branch(ne);
1818 // Check that instance descriptors are not empty so that we can
1819 // check for an enum cache. Leave the map in r3 for the subsequent
1820 // prototype load.
1821 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
1822 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
1823 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
1824 __ cmp(r2, ip);
1825 call_runtime.Branch(eq);
1826 // Check that there in an enum cache in the non-empty instance
1827 // descriptors. This is the case if the next enumeration index
1828 // field does not contain a smi.
1829 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
1830 __ tst(r2, Operand(kSmiTagMask));
1831 call_runtime.Branch(eq);
1832 // For all objects but the receiver, check that the cache is empty.
1833 // r4: empty fixed array root.
1834 __ cmp(r1, r0);
1835 check_prototype.Branch(eq);
1836 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1837 __ cmp(r2, r4);
1838 call_runtime.Branch(ne);
1839 check_prototype.Bind();
1840 // Load the prototype from the map and loop if non-null.
1841 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
1842 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1843 __ cmp(r1, ip);
1844 loop.Branch(ne);
1845 // The enum cache is valid. Load the map of the object being
1846 // iterated over and use the cache for the iteration.
1847 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
1848 use_cache.Jump();
1849
1850 call_runtime.Bind();
1851 // Call the runtime to get the property names for the object.
1852 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00001853 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1854
Steve Blockd0582a62009-12-15 09:54:21 +00001855 // If we got a map from the runtime call, we can do a fast
1856 // modification check. Otherwise, we got a fixed array, and we have
1857 // to do a slow check.
1858 // r0: map or fixed array (result from call to
1859 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00001860 __ mov(r2, Operand(r0));
1861 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1862 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1863 __ cmp(r1, ip);
1864 fixed_array.Branch(ne);
1865
Steve Blockd0582a62009-12-15 09:54:21 +00001866 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001867 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001868 // r0: map (either the result from a call to
1869 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1870 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001871 __ mov(r1, Operand(r0));
1872 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1873 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1874 __ ldr(r2,
1875 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1876
1877 frame_->EmitPush(r0); // map
1878 frame_->EmitPush(r2); // enum cache bridge cache
1879 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1880 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1881 frame_->EmitPush(r0);
1882 __ mov(r0, Operand(Smi::FromInt(0)));
1883 frame_->EmitPush(r0);
1884 entry.Jump();
1885
1886 fixed_array.Bind();
1887 __ mov(r1, Operand(Smi::FromInt(0)));
1888 frame_->EmitPush(r1); // insert 0 in place of Map
1889 frame_->EmitPush(r0);
1890
1891 // Push the length of the array and the initial index onto the stack.
1892 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1893 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1894 frame_->EmitPush(r0);
1895 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1896 frame_->EmitPush(r0);
1897
1898 // Condition.
1899 entry.Bind();
1900 // sp[0] : index
1901 // sp[1] : array/enum cache length
1902 // sp[2] : array or enum cache
1903 // sp[3] : 0 or map
1904 // sp[4] : enumerable
1905 // Grab the current frame's height for the break and continue
1906 // targets only after all the state is pushed on the frame.
1907 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1908 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1909
1910 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1911 __ ldr(r1, frame_->ElementAt(1)); // load the length
1912 __ cmp(r0, Operand(r1)); // compare to the array length
1913 node->break_target()->Branch(hs);
1914
1915 __ ldr(r0, frame_->ElementAt(0));
1916
1917 // Get the i'th entry of the array.
1918 __ ldr(r2, frame_->ElementAt(2));
1919 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1920 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1921
1922 // Get Map or 0.
1923 __ ldr(r2, frame_->ElementAt(3));
1924 // Check if this (still) matches the map of the enumerable.
1925 // If not, we have to filter the key.
1926 __ ldr(r1, frame_->ElementAt(4));
1927 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1928 __ cmp(r1, Operand(r2));
1929 end_del_check.Branch(eq);
1930
1931 // Convert the entry to a string (or null if it isn't a property anymore).
1932 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1933 frame_->EmitPush(r0);
1934 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00001935 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001936 __ mov(r3, Operand(r0));
1937
1938 // If the property has been removed while iterating, we just skip it.
1939 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1940 __ cmp(r3, ip);
1941 node->continue_target()->Branch(eq);
1942
1943 end_del_check.Bind();
1944 // Store the entry in the 'each' expression and take another spin in the
1945 // loop. r3: i'th entry of the enum cache (or string there of)
1946 frame_->EmitPush(r3); // push entry
1947 { Reference each(this, node->each());
1948 if (!each.is_illegal()) {
1949 if (each.size() > 0) {
1950 __ ldr(r0, frame_->ElementAt(each.size()));
1951 frame_->EmitPush(r0);
Leon Clarked91b9f72010-01-27 17:25:45 +00001952 each.SetValue(NOT_CONST_INIT);
1953 frame_->Drop(2);
1954 } else {
1955 // If the reference was to a slot we rely on the convenient property
1956 // that it doesn't matter whether a value (eg, r3 pushed above) is
1957 // right on top of or right underneath a zero-sized reference.
1958 each.SetValue(NOT_CONST_INIT);
1959 frame_->Drop();
Steve Blocka7e24c12009-10-30 11:49:00 +00001960 }
1961 }
1962 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001963 // Body.
1964 CheckStack(); // TODO(1222600): ignore if body contains calls.
1965 VisitAndSpill(node->body());
1966
1967 // Next. Reestablish a spilled frame in case we are coming here via
1968 // a continue in the body.
1969 node->continue_target()->Bind();
1970 frame_->SpillAll();
1971 frame_->EmitPop(r0);
1972 __ add(r0, r0, Operand(Smi::FromInt(1)));
1973 frame_->EmitPush(r0);
1974 entry.Jump();
1975
1976 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1977 // any frame.
1978 node->break_target()->Bind();
1979 frame_->Drop(5);
1980
1981 // Exit.
1982 exit.Bind();
1983 node->continue_target()->Unuse();
1984 node->break_target()->Unuse();
1985 ASSERT(frame_->height() == original_height);
1986}
1987
1988
Steve Block3ce2e202009-11-05 08:53:23 +00001989void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001990#ifdef DEBUG
1991 int original_height = frame_->height();
1992#endif
1993 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001994 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001995 CodeForStatementPosition(node);
1996
1997 JumpTarget try_block;
1998 JumpTarget exit;
1999
2000 try_block.Call();
2001 // --- Catch block ---
2002 frame_->EmitPush(r0);
2003
2004 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00002005 Variable* catch_var = node->catch_var()->var();
2006 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2007 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00002008
2009 // Remove the exception from the stack.
2010 frame_->Drop();
2011
2012 VisitStatementsAndSpill(node->catch_block()->statements());
2013 if (frame_ != NULL) {
2014 exit.Jump();
2015 }
2016
2017
2018 // --- Try block ---
2019 try_block.Bind();
2020
2021 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2022 int handler_height = frame_->height();
2023
2024 // Shadow the labels for all escapes from the try block, including
2025 // returns. During shadowing, the original label is hidden as the
2026 // LabelShadow and operations on the original actually affect the
2027 // shadowing label.
2028 //
2029 // We should probably try to unify the escaping labels and the return
2030 // label.
2031 int nof_escapes = node->escaping_targets()->length();
2032 List<ShadowTarget*> shadows(1 + nof_escapes);
2033
2034 // Add the shadow target for the function return.
2035 static const int kReturnShadowIndex = 0;
2036 shadows.Add(new ShadowTarget(&function_return_));
2037 bool function_return_was_shadowed = function_return_is_shadowed_;
2038 function_return_is_shadowed_ = true;
2039 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2040
2041 // Add the remaining shadow targets.
2042 for (int i = 0; i < nof_escapes; i++) {
2043 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2044 }
2045
2046 // Generate code for the statements in the try block.
2047 VisitStatementsAndSpill(node->try_block()->statements());
2048
2049 // Stop the introduced shadowing and count the number of required unlinks.
2050 // After shadowing stops, the original labels are unshadowed and the
2051 // LabelShadows represent the formerly shadowing labels.
2052 bool has_unlinks = false;
2053 for (int i = 0; i < shadows.length(); i++) {
2054 shadows[i]->StopShadowing();
2055 has_unlinks = has_unlinks || shadows[i]->is_linked();
2056 }
2057 function_return_is_shadowed_ = function_return_was_shadowed;
2058
2059 // Get an external reference to the handler address.
2060 ExternalReference handler_address(Top::k_handler_address);
2061
2062 // If we can fall off the end of the try block, unlink from try chain.
2063 if (has_valid_frame()) {
2064 // The next handler address is on top of the frame. Unlink from
2065 // the handler list and drop the rest of this handler from the
2066 // frame.
2067 ASSERT(StackHandlerConstants::kNextOffset == 0);
2068 frame_->EmitPop(r1);
2069 __ mov(r3, Operand(handler_address));
2070 __ str(r1, MemOperand(r3));
2071 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2072 if (has_unlinks) {
2073 exit.Jump();
2074 }
2075 }
2076
2077 // Generate unlink code for the (formerly) shadowing labels that have been
2078 // jumped to. Deallocate each shadow target.
2079 for (int i = 0; i < shadows.length(); i++) {
2080 if (shadows[i]->is_linked()) {
2081 // Unlink from try chain;
2082 shadows[i]->Bind();
2083 // Because we can be jumping here (to spilled code) from unspilled
2084 // code, we need to reestablish a spilled frame at this block.
2085 frame_->SpillAll();
2086
2087 // Reload sp from the top handler, because some statements that we
2088 // break from (eg, for...in) may have left stuff on the stack.
2089 __ mov(r3, Operand(handler_address));
2090 __ ldr(sp, MemOperand(r3));
2091 frame_->Forget(frame_->height() - handler_height);
2092
2093 ASSERT(StackHandlerConstants::kNextOffset == 0);
2094 frame_->EmitPop(r1);
2095 __ str(r1, MemOperand(r3));
2096 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2097
2098 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2099 frame_->PrepareForReturn();
2100 }
2101 shadows[i]->other_target()->Jump();
2102 }
2103 }
2104
2105 exit.Bind();
2106 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2107}
2108
2109
Steve Block3ce2e202009-11-05 08:53:23 +00002110void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002111#ifdef DEBUG
2112 int original_height = frame_->height();
2113#endif
2114 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002115 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002116 CodeForStatementPosition(node);
2117
2118 // State: Used to keep track of reason for entering the finally
2119 // block. Should probably be extended to hold information for
2120 // break/continue from within the try block.
2121 enum { FALLING, THROWING, JUMPING };
2122
2123 JumpTarget try_block;
2124 JumpTarget finally_block;
2125
2126 try_block.Call();
2127
2128 frame_->EmitPush(r0); // save exception object on the stack
2129 // In case of thrown exceptions, this is where we continue.
2130 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2131 finally_block.Jump();
2132
2133 // --- Try block ---
2134 try_block.Bind();
2135
2136 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2137 int handler_height = frame_->height();
2138
2139 // Shadow the labels for all escapes from the try block, including
2140 // returns. Shadowing hides the original label as the LabelShadow and
2141 // operations on the original actually affect the shadowing label.
2142 //
2143 // We should probably try to unify the escaping labels and the return
2144 // label.
2145 int nof_escapes = node->escaping_targets()->length();
2146 List<ShadowTarget*> shadows(1 + nof_escapes);
2147
2148 // Add the shadow target for the function return.
2149 static const int kReturnShadowIndex = 0;
2150 shadows.Add(new ShadowTarget(&function_return_));
2151 bool function_return_was_shadowed = function_return_is_shadowed_;
2152 function_return_is_shadowed_ = true;
2153 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2154
2155 // Add the remaining shadow targets.
2156 for (int i = 0; i < nof_escapes; i++) {
2157 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2158 }
2159
2160 // Generate code for the statements in the try block.
2161 VisitStatementsAndSpill(node->try_block()->statements());
2162
2163 // Stop the introduced shadowing and count the number of required unlinks.
2164 // After shadowing stops, the original labels are unshadowed and the
2165 // LabelShadows represent the formerly shadowing labels.
2166 int nof_unlinks = 0;
2167 for (int i = 0; i < shadows.length(); i++) {
2168 shadows[i]->StopShadowing();
2169 if (shadows[i]->is_linked()) nof_unlinks++;
2170 }
2171 function_return_is_shadowed_ = function_return_was_shadowed;
2172
2173 // Get an external reference to the handler address.
2174 ExternalReference handler_address(Top::k_handler_address);
2175
2176 // If we can fall off the end of the try block, unlink from the try
2177 // chain and set the state on the frame to FALLING.
2178 if (has_valid_frame()) {
2179 // The next handler address is on top of the frame.
2180 ASSERT(StackHandlerConstants::kNextOffset == 0);
2181 frame_->EmitPop(r1);
2182 __ mov(r3, Operand(handler_address));
2183 __ str(r1, MemOperand(r3));
2184 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2185
2186 // Fake a top of stack value (unneeded when FALLING) and set the
2187 // state in r2, then jump around the unlink blocks if any.
2188 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2189 frame_->EmitPush(r0);
2190 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2191 if (nof_unlinks > 0) {
2192 finally_block.Jump();
2193 }
2194 }
2195
2196 // Generate code to unlink and set the state for the (formerly)
2197 // shadowing targets that have been jumped to.
2198 for (int i = 0; i < shadows.length(); i++) {
2199 if (shadows[i]->is_linked()) {
2200 // If we have come from the shadowed return, the return value is
2201 // in (a non-refcounted reference to) r0. We must preserve it
2202 // until it is pushed.
2203 //
2204 // Because we can be jumping here (to spilled code) from
2205 // unspilled code, we need to reestablish a spilled frame at
2206 // this block.
2207 shadows[i]->Bind();
2208 frame_->SpillAll();
2209
2210 // Reload sp from the top handler, because some statements that
2211 // we break from (eg, for...in) may have left stuff on the
2212 // stack.
2213 __ mov(r3, Operand(handler_address));
2214 __ ldr(sp, MemOperand(r3));
2215 frame_->Forget(frame_->height() - handler_height);
2216
2217 // Unlink this handler and drop it from the frame. The next
2218 // handler address is currently on top of the frame.
2219 ASSERT(StackHandlerConstants::kNextOffset == 0);
2220 frame_->EmitPop(r1);
2221 __ str(r1, MemOperand(r3));
2222 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2223
2224 if (i == kReturnShadowIndex) {
2225 // If this label shadowed the function return, materialize the
2226 // return value on the stack.
2227 frame_->EmitPush(r0);
2228 } else {
2229 // Fake TOS for targets that shadowed breaks and continues.
2230 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2231 frame_->EmitPush(r0);
2232 }
2233 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2234 if (--nof_unlinks > 0) {
2235 // If this is not the last unlink block, jump around the next.
2236 finally_block.Jump();
2237 }
2238 }
2239 }
2240
2241 // --- Finally block ---
2242 finally_block.Bind();
2243
2244 // Push the state on the stack.
2245 frame_->EmitPush(r2);
2246
2247 // We keep two elements on the stack - the (possibly faked) result
2248 // and the state - while evaluating the finally block.
2249 //
2250 // Generate code for the statements in the finally block.
2251 VisitStatementsAndSpill(node->finally_block()->statements());
2252
2253 if (has_valid_frame()) {
2254 // Restore state and return value or faked TOS.
2255 frame_->EmitPop(r2);
2256 frame_->EmitPop(r0);
2257 }
2258
2259 // Generate code to jump to the right destination for all used
2260 // formerly shadowing targets. Deallocate each shadow target.
2261 for (int i = 0; i < shadows.length(); i++) {
2262 if (has_valid_frame() && shadows[i]->is_bound()) {
2263 JumpTarget* original = shadows[i]->other_target();
2264 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2265 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2266 JumpTarget skip;
2267 skip.Branch(ne);
2268 frame_->PrepareForReturn();
2269 original->Jump();
2270 skip.Bind();
2271 } else {
2272 original->Branch(eq);
2273 }
2274 }
2275 }
2276
2277 if (has_valid_frame()) {
2278 // Check if we need to rethrow the exception.
2279 JumpTarget exit;
2280 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2281 exit.Branch(ne);
2282
2283 // Rethrow exception.
2284 frame_->EmitPush(r0);
2285 frame_->CallRuntime(Runtime::kReThrow, 1);
2286
2287 // Done.
2288 exit.Bind();
2289 }
2290 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2291}
2292
2293
2294void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2295#ifdef DEBUG
2296 int original_height = frame_->height();
2297#endif
2298 VirtualFrame::SpilledScope spilled_scope;
2299 Comment cmnt(masm_, "[ DebuggerStatament");
2300 CodeForStatementPosition(node);
2301#ifdef ENABLE_DEBUGGER_SUPPORT
Andrei Popescu402d9372010-02-26 13:31:12 +00002302 frame_->DebugBreak();
Steve Blocka7e24c12009-10-30 11:49:00 +00002303#endif
2304 // Ignore the return value.
2305 ASSERT(frame_->height() == original_height);
2306}
2307
2308
2309void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2310 VirtualFrame::SpilledScope spilled_scope;
2311 ASSERT(boilerplate->IsBoilerplate());
2312
Steve Block3ce2e202009-11-05 08:53:23 +00002313 __ mov(r0, Operand(boilerplate));
Leon Clarkee46be812010-01-19 14:06:41 +00002314 // Use the fast case closure allocation code that allocates in new
2315 // space for nested functions that don't need literals cloning.
2316 if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
2317 FastNewClosureStub stub;
2318 frame_->EmitPush(r0);
2319 frame_->CallStub(&stub, 1);
2320 frame_->EmitPush(r0);
2321 } else {
2322 // Create a new closure.
2323 frame_->EmitPush(cp);
2324 frame_->EmitPush(r0);
2325 frame_->CallRuntime(Runtime::kNewClosure, 2);
2326 frame_->EmitPush(r0);
2327 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002328}
2329
2330
2331void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2332#ifdef DEBUG
2333 int original_height = frame_->height();
2334#endif
2335 VirtualFrame::SpilledScope spilled_scope;
2336 Comment cmnt(masm_, "[ FunctionLiteral");
2337
2338 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002339 Handle<JSFunction> boilerplate =
Andrei Popescu31002712010-02-23 13:46:05 +00002340 Compiler::BuildBoilerplate(node, script(), this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002341 // Check for stack-overflow exception.
2342 if (HasStackOverflow()) {
2343 ASSERT(frame_->height() == original_height);
2344 return;
2345 }
2346 InstantiateBoilerplate(boilerplate);
2347 ASSERT(frame_->height() == original_height + 1);
2348}
2349
2350
2351void CodeGenerator::VisitFunctionBoilerplateLiteral(
2352 FunctionBoilerplateLiteral* node) {
2353#ifdef DEBUG
2354 int original_height = frame_->height();
2355#endif
2356 VirtualFrame::SpilledScope spilled_scope;
2357 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2358 InstantiateBoilerplate(node->boilerplate());
2359 ASSERT(frame_->height() == original_height + 1);
2360}
2361
2362
2363void CodeGenerator::VisitConditional(Conditional* node) {
2364#ifdef DEBUG
2365 int original_height = frame_->height();
2366#endif
2367 VirtualFrame::SpilledScope spilled_scope;
2368 Comment cmnt(masm_, "[ Conditional");
2369 JumpTarget then;
2370 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002371 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002372 if (has_valid_frame()) {
2373 Branch(false, &else_);
2374 }
2375 if (has_valid_frame() || then.is_linked()) {
2376 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002377 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002378 }
2379 if (else_.is_linked()) {
2380 JumpTarget exit;
2381 if (has_valid_frame()) exit.Jump();
2382 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002383 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002384 if (exit.is_linked()) exit.Bind();
2385 }
2386 ASSERT(frame_->height() == original_height + 1);
2387}
2388
2389
2390void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2391 VirtualFrame::SpilledScope spilled_scope;
2392 if (slot->type() == Slot::LOOKUP) {
2393 ASSERT(slot->var()->is_dynamic());
2394
2395 JumpTarget slow;
2396 JumpTarget done;
2397
2398 // Generate fast-case code for variables that might be shadowed by
2399 // eval-introduced variables. Eval is used a lot without
2400 // introducing variables. In those cases, we do not want to
2401 // perform a runtime call for all variables in the scope
2402 // containing the eval.
2403 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2404 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2405 // If there was no control flow to slow, we can exit early.
2406 if (!slow.is_linked()) {
2407 frame_->EmitPush(r0);
2408 return;
2409 }
2410
2411 done.Jump();
2412
2413 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2414 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2415 // Only generate the fast case for locals that rewrite to slots.
2416 // This rules out argument loads.
2417 if (potential_slot != NULL) {
2418 __ ldr(r0,
2419 ContextSlotOperandCheckExtensions(potential_slot,
2420 r1,
2421 r2,
2422 &slow));
2423 if (potential_slot->var()->mode() == Variable::CONST) {
2424 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2425 __ cmp(r0, ip);
2426 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2427 }
2428 // There is always control flow to slow from
2429 // ContextSlotOperandCheckExtensions so we have to jump around
2430 // it.
2431 done.Jump();
2432 }
2433 }
2434
2435 slow.Bind();
2436 frame_->EmitPush(cp);
2437 __ mov(r0, Operand(slot->var()->name()));
2438 frame_->EmitPush(r0);
2439
2440 if (typeof_state == INSIDE_TYPEOF) {
2441 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2442 } else {
2443 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2444 }
2445
2446 done.Bind();
2447 frame_->EmitPush(r0);
2448
2449 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00002450 // Special handling for locals allocated in registers.
2451 __ ldr(r0, SlotOperand(slot, r2));
2452 frame_->EmitPush(r0);
2453 if (slot->var()->mode() == Variable::CONST) {
2454 // Const slots may contain 'the hole' value (the constant hasn't been
2455 // initialized yet) which needs to be converted into the 'undefined'
2456 // value.
2457 Comment cmnt(masm_, "[ Unhole const");
2458 frame_->EmitPop(r0);
2459 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2460 __ cmp(r0, ip);
2461 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2462 frame_->EmitPush(r0);
2463 }
2464 }
2465}
2466
2467
Leon Clarkee46be812010-01-19 14:06:41 +00002468void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2469 ASSERT(slot != NULL);
2470 if (slot->type() == Slot::LOOKUP) {
2471 ASSERT(slot->var()->is_dynamic());
2472
2473 // For now, just do a runtime call.
2474 frame_->EmitPush(cp);
2475 __ mov(r0, Operand(slot->var()->name()));
2476 frame_->EmitPush(r0);
2477
2478 if (init_state == CONST_INIT) {
2479 // Same as the case for a normal store, but ignores attribute
2480 // (e.g. READ_ONLY) of context slot so that we can initialize
2481 // const properties (introduced via eval("const foo = (some
2482 // expr);")). Also, uses the current function context instead of
2483 // the top context.
2484 //
2485 // Note that we must declare the foo upon entry of eval(), via a
2486 // context slot declaration, but we cannot initialize it at the
2487 // same time, because the const declaration may be at the end of
2488 // the eval code (sigh...) and the const variable may have been
2489 // used before (where its value is 'undefined'). Thus, we can only
2490 // do the initialization when we actually encounter the expression
2491 // and when the expression operands are defined and valid, and
2492 // thus we need the split into 2 operations: declaration of the
2493 // context slot followed by initialization.
2494 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2495 } else {
2496 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2497 }
2498 // Storing a variable must keep the (new) value on the expression
2499 // stack. This is necessary for compiling assignment expressions.
2500 frame_->EmitPush(r0);
2501
2502 } else {
2503 ASSERT(!slot->var()->is_dynamic());
2504
2505 JumpTarget exit;
2506 if (init_state == CONST_INIT) {
2507 ASSERT(slot->var()->mode() == Variable::CONST);
2508 // Only the first const initialization must be executed (the slot
2509 // still contains 'the hole' value). When the assignment is
2510 // executed, the code is identical to a normal store (see below).
2511 Comment cmnt(masm_, "[ Init const");
2512 __ ldr(r2, SlotOperand(slot, r2));
2513 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2514 __ cmp(r2, ip);
2515 exit.Branch(ne);
2516 }
2517
2518 // We must execute the store. Storing a variable must keep the
2519 // (new) value on the stack. This is necessary for compiling
2520 // assignment expressions.
2521 //
2522 // Note: We will reach here even with slot->var()->mode() ==
2523 // Variable::CONST because of const declarations which will
2524 // initialize consts to 'the hole' value and by doing so, end up
2525 // calling this code. r2 may be loaded with context; used below in
2526 // RecordWrite.
2527 frame_->EmitPop(r0);
2528 __ str(r0, SlotOperand(slot, r2));
2529 frame_->EmitPush(r0);
2530 if (slot->type() == Slot::CONTEXT) {
2531 // Skip write barrier if the written value is a smi.
2532 __ tst(r0, Operand(kSmiTagMask));
2533 exit.Branch(eq);
2534 // r2 is loaded with context when calling SlotOperand above.
2535 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2536 __ mov(r3, Operand(offset));
2537 __ RecordWrite(r2, r3, r1);
2538 }
2539 // If we definitely did not jump over the assignment, we do not need
2540 // to bind the exit label. Doing so can defeat peephole
2541 // optimization.
2542 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
2543 exit.Bind();
2544 }
2545 }
2546}
2547
2548
Steve Blocka7e24c12009-10-30 11:49:00 +00002549void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2550 TypeofState typeof_state,
2551 Register tmp,
2552 Register tmp2,
2553 JumpTarget* slow) {
2554 // Check that no extension objects have been created by calls to
2555 // eval from the current scope to the global scope.
2556 Register context = cp;
2557 Scope* s = scope();
2558 while (s != NULL) {
2559 if (s->num_heap_slots() > 0) {
2560 if (s->calls_eval()) {
2561 // Check that extension is NULL.
2562 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2563 __ tst(tmp2, tmp2);
2564 slow->Branch(ne);
2565 }
2566 // Load next context in chain.
2567 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2568 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2569 context = tmp;
2570 }
2571 // If no outer scope calls eval, we do not need to check more
2572 // context extensions.
2573 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2574 s = s->outer_scope();
2575 }
2576
2577 if (s->is_eval_scope()) {
2578 Label next, fast;
2579 if (!context.is(tmp)) {
2580 __ mov(tmp, Operand(context));
2581 }
2582 __ bind(&next);
2583 // Terminate at global context.
2584 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2585 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2586 __ cmp(tmp2, ip);
2587 __ b(eq, &fast);
2588 // Check that extension is NULL.
2589 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2590 __ tst(tmp2, tmp2);
2591 slow->Branch(ne);
2592 // Load next context in chain.
2593 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2594 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2595 __ b(&next);
2596 __ bind(&fast);
2597 }
2598
2599 // All extension objects were empty and it is safe to use a global
2600 // load IC call.
2601 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2602 // Load the global object.
2603 LoadGlobal();
2604 // Setup the name register.
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 __ mov(r2, Operand(slot->var()->name()));
2606 // Call IC stub.
2607 if (typeof_state == INSIDE_TYPEOF) {
Leon Clarke4515c472010-02-03 11:58:03 +00002608 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002609 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00002610 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002611 }
2612
2613 // Drop the global object. The result is in r0.
2614 frame_->Drop();
2615}
2616
2617
2618void CodeGenerator::VisitSlot(Slot* node) {
2619#ifdef DEBUG
2620 int original_height = frame_->height();
2621#endif
2622 VirtualFrame::SpilledScope spilled_scope;
2623 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002624 LoadFromSlot(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002625 ASSERT(frame_->height() == original_height + 1);
2626}
2627
2628
2629void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2630#ifdef DEBUG
2631 int original_height = frame_->height();
2632#endif
2633 VirtualFrame::SpilledScope spilled_scope;
2634 Comment cmnt(masm_, "[ VariableProxy");
2635
2636 Variable* var = node->var();
2637 Expression* expr = var->rewrite();
2638 if (expr != NULL) {
2639 Visit(expr);
2640 } else {
2641 ASSERT(var->is_global());
2642 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002643 ref.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002644 }
2645 ASSERT(frame_->height() == original_height + 1);
2646}
2647
2648
2649void CodeGenerator::VisitLiteral(Literal* node) {
2650#ifdef DEBUG
2651 int original_height = frame_->height();
2652#endif
2653 VirtualFrame::SpilledScope spilled_scope;
2654 Comment cmnt(masm_, "[ Literal");
2655 __ mov(r0, Operand(node->handle()));
2656 frame_->EmitPush(r0);
2657 ASSERT(frame_->height() == original_height + 1);
2658}
2659
2660
2661void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2662#ifdef DEBUG
2663 int original_height = frame_->height();
2664#endif
2665 VirtualFrame::SpilledScope spilled_scope;
2666 Comment cmnt(masm_, "[ RexExp Literal");
2667
2668 // Retrieve the literal array and check the allocated entry.
2669
2670 // Load the function of this activation.
2671 __ ldr(r1, frame_->Function());
2672
2673 // Load the literals array of the function.
2674 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2675
2676 // Load the literal at the ast saved index.
2677 int literal_offset =
2678 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2679 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2680
2681 JumpTarget done;
2682 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2683 __ cmp(r2, ip);
2684 done.Branch(ne);
2685
2686 // If the entry is undefined we call the runtime system to computed
2687 // the literal.
2688 frame_->EmitPush(r1); // literal array (0)
2689 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2690 frame_->EmitPush(r0); // literal index (1)
2691 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2692 frame_->EmitPush(r0);
2693 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2694 frame_->EmitPush(r0);
2695 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2696 __ mov(r2, Operand(r0));
2697
2698 done.Bind();
2699 // Push the literal.
2700 frame_->EmitPush(r2);
2701 ASSERT(frame_->height() == original_height + 1);
2702}
2703
2704
Steve Blocka7e24c12009-10-30 11:49:00 +00002705void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2706#ifdef DEBUG
2707 int original_height = frame_->height();
2708#endif
2709 VirtualFrame::SpilledScope spilled_scope;
2710 Comment cmnt(masm_, "[ ObjectLiteral");
2711
Steve Blocka7e24c12009-10-30 11:49:00 +00002712 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002713 __ ldr(r2, frame_->Function());
2714 // Literal array.
2715 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2716 // Literal index.
2717 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2718 // Constant properties.
2719 __ mov(r0, Operand(node->constant_properties()));
2720 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2721 if (node->depth() > 1) {
2722 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
2723 } else {
2724 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002725 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 frame_->EmitPush(r0); // save the result
Steve Blocka7e24c12009-10-30 11:49:00 +00002727 for (int i = 0; i < node->properties()->length(); i++) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002728 // At the start of each iteration, the top of stack contains
2729 // the newly created object literal.
Steve Blocka7e24c12009-10-30 11:49:00 +00002730 ObjectLiteral::Property* property = node->properties()->at(i);
2731 Literal* key = property->key();
2732 Expression* value = property->value();
2733 switch (property->kind()) {
2734 case ObjectLiteral::Property::CONSTANT:
2735 break;
2736 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2737 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2738 // else fall through
Andrei Popescu402d9372010-02-26 13:31:12 +00002739 case ObjectLiteral::Property::COMPUTED:
2740 if (key->handle()->IsSymbol()) {
2741 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2742 LoadAndSpill(value);
2743 frame_->EmitPop(r0);
2744 __ mov(r2, Operand(key->handle()));
2745 __ ldr(r1, frame_->Top()); // Load the receiver.
2746 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
2747 break;
2748 }
2749 // else fall through
Steve Blocka7e24c12009-10-30 11:49:00 +00002750 case ObjectLiteral::Property::PROTOTYPE: {
Andrei Popescu402d9372010-02-26 13:31:12 +00002751 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00002752 frame_->EmitPush(r0); // dup the result
2753 LoadAndSpill(key);
2754 LoadAndSpill(value);
2755 frame_->CallRuntime(Runtime::kSetProperty, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 break;
2757 }
2758 case ObjectLiteral::Property::SETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00002759 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00002760 frame_->EmitPush(r0);
2761 LoadAndSpill(key);
2762 __ mov(r0, Operand(Smi::FromInt(1)));
2763 frame_->EmitPush(r0);
2764 LoadAndSpill(value);
2765 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 break;
2767 }
2768 case ObjectLiteral::Property::GETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00002769 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00002770 frame_->EmitPush(r0);
2771 LoadAndSpill(key);
2772 __ mov(r0, Operand(Smi::FromInt(0)));
2773 frame_->EmitPush(r0);
2774 LoadAndSpill(value);
2775 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00002776 break;
2777 }
2778 }
2779 }
2780 ASSERT(frame_->height() == original_height + 1);
2781}
2782
2783
Steve Blocka7e24c12009-10-30 11:49:00 +00002784void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2785#ifdef DEBUG
2786 int original_height = frame_->height();
2787#endif
2788 VirtualFrame::SpilledScope spilled_scope;
2789 Comment cmnt(masm_, "[ ArrayLiteral");
2790
Steve Blocka7e24c12009-10-30 11:49:00 +00002791 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002792 __ ldr(r2, frame_->Function());
Andrei Popescu402d9372010-02-26 13:31:12 +00002793 // Load the literals array of the function.
Leon Clarkee46be812010-01-19 14:06:41 +00002794 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00002795 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
Leon Clarkee46be812010-01-19 14:06:41 +00002796 __ mov(r0, Operand(node->constant_elements()));
2797 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
Andrei Popescu402d9372010-02-26 13:31:12 +00002798 int length = node->values()->length();
Leon Clarkee46be812010-01-19 14:06:41 +00002799 if (node->depth() > 1) {
2800 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00002801 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
Leon Clarkee46be812010-01-19 14:06:41 +00002802 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00002803 } else {
2804 FastCloneShallowArrayStub stub(length);
2805 frame_->CallStub(&stub, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002807 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002808 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002809
2810 // Generate code to set the elements in the array that are not
2811 // literals.
2812 for (int i = 0; i < node->values()->length(); i++) {
2813 Expression* value = node->values()->at(i);
2814
2815 // If value is a literal the property value is already set in the
2816 // boilerplate object.
2817 if (value->AsLiteral() != NULL) continue;
2818 // If value is a materialized literal the property value is already set
2819 // in the boilerplate object if it is simple.
2820 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2821
2822 // The property must be set by generated code.
2823 LoadAndSpill(value);
2824 frame_->EmitPop(r0);
2825
2826 // Fetch the object literal.
2827 __ ldr(r1, frame_->Top());
2828 // Get the elements array.
2829 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2830
2831 // Write to the indexed properties array.
2832 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2833 __ str(r0, FieldMemOperand(r1, offset));
2834
2835 // Update the write barrier for the array address.
2836 __ mov(r3, Operand(offset));
2837 __ RecordWrite(r1, r3, r2);
2838 }
2839 ASSERT(frame_->height() == original_height + 1);
2840}
2841
2842
2843void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2844#ifdef DEBUG
2845 int original_height = frame_->height();
2846#endif
2847 VirtualFrame::SpilledScope spilled_scope;
2848 // Call runtime routine to allocate the catch extension object and
2849 // assign the exception value to the catch variable.
2850 Comment cmnt(masm_, "[ CatchExtensionObject");
2851 LoadAndSpill(node->key());
2852 LoadAndSpill(node->value());
2853 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2854 frame_->EmitPush(r0);
2855 ASSERT(frame_->height() == original_height + 1);
2856}
2857
2858
2859void CodeGenerator::VisitAssignment(Assignment* node) {
2860#ifdef DEBUG
2861 int original_height = frame_->height();
2862#endif
2863 VirtualFrame::SpilledScope spilled_scope;
2864 Comment cmnt(masm_, "[ Assignment");
2865
Leon Clarked91b9f72010-01-27 17:25:45 +00002866 { Reference target(this, node->target(), node->is_compound());
Steve Blocka7e24c12009-10-30 11:49:00 +00002867 if (target.is_illegal()) {
2868 // Fool the virtual frame into thinking that we left the assignment's
2869 // value on the frame.
2870 __ mov(r0, Operand(Smi::FromInt(0)));
2871 frame_->EmitPush(r0);
2872 ASSERT(frame_->height() == original_height + 1);
2873 return;
2874 }
2875
2876 if (node->op() == Token::ASSIGN ||
2877 node->op() == Token::INIT_VAR ||
2878 node->op() == Token::INIT_CONST) {
2879 LoadAndSpill(node->value());
2880
Leon Clarked91b9f72010-01-27 17:25:45 +00002881 } else { // Assignment is a compound assignment.
Steve Blocka7e24c12009-10-30 11:49:00 +00002882 // Get the old value of the lhs.
Steve Blockd0582a62009-12-15 09:54:21 +00002883 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002884 Literal* literal = node->value()->AsLiteral();
2885 bool overwrite =
2886 (node->value()->AsBinaryOperation() != NULL &&
2887 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2888 if (literal != NULL && literal->handle()->IsSmi()) {
2889 SmiOperation(node->binary_op(),
2890 literal->handle(),
2891 false,
2892 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2893 frame_->EmitPush(r0);
2894
2895 } else {
2896 LoadAndSpill(node->value());
2897 GenericBinaryOperation(node->binary_op(),
2898 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2899 frame_->EmitPush(r0);
2900 }
2901 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002902 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2903 if (var != NULL &&
2904 (var->mode() == Variable::CONST) &&
2905 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2906 // Assignment ignored - leave the value on the stack.
Leon Clarked91b9f72010-01-27 17:25:45 +00002907 UnloadReference(&target);
Steve Blocka7e24c12009-10-30 11:49:00 +00002908 } else {
2909 CodeForSourcePosition(node->position());
2910 if (node->op() == Token::INIT_CONST) {
2911 // Dynamic constant initializations must use the function context
2912 // and initialize the actual constant declared. Dynamic variable
2913 // initializations are simply assignments and use SetValue.
2914 target.SetValue(CONST_INIT);
2915 } else {
2916 target.SetValue(NOT_CONST_INIT);
2917 }
2918 }
2919 }
2920 ASSERT(frame_->height() == original_height + 1);
2921}
2922
2923
2924void CodeGenerator::VisitThrow(Throw* node) {
2925#ifdef DEBUG
2926 int original_height = frame_->height();
2927#endif
2928 VirtualFrame::SpilledScope spilled_scope;
2929 Comment cmnt(masm_, "[ Throw");
2930
2931 LoadAndSpill(node->exception());
2932 CodeForSourcePosition(node->position());
2933 frame_->CallRuntime(Runtime::kThrow, 1);
2934 frame_->EmitPush(r0);
2935 ASSERT(frame_->height() == original_height + 1);
2936}
2937
2938
2939void CodeGenerator::VisitProperty(Property* node) {
2940#ifdef DEBUG
2941 int original_height = frame_->height();
2942#endif
2943 VirtualFrame::SpilledScope spilled_scope;
2944 Comment cmnt(masm_, "[ Property");
2945
2946 { Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002947 property.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 }
2949 ASSERT(frame_->height() == original_height + 1);
2950}
2951
2952
2953void CodeGenerator::VisitCall(Call* node) {
2954#ifdef DEBUG
2955 int original_height = frame_->height();
2956#endif
2957 VirtualFrame::SpilledScope spilled_scope;
2958 Comment cmnt(masm_, "[ Call");
2959
2960 Expression* function = node->expression();
2961 ZoneList<Expression*>* args = node->arguments();
2962
2963 // Standard function call.
2964 // Check if the function is a variable or a property.
2965 Variable* var = function->AsVariableProxy()->AsVariable();
2966 Property* property = function->AsProperty();
2967
2968 // ------------------------------------------------------------------------
2969 // Fast-case: Use inline caching.
2970 // ---
2971 // According to ECMA-262, section 11.2.3, page 44, the function to call
2972 // must be resolved after the arguments have been evaluated. The IC code
2973 // automatically handles this by loading the arguments before the function
2974 // is resolved in cache misses (this also holds for megamorphic calls).
2975 // ------------------------------------------------------------------------
2976
2977 if (var != NULL && var->is_possibly_eval()) {
2978 // ----------------------------------
2979 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2980 // ----------------------------------
2981
2982 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2983 // resolve the function we need to call and the receiver of the
2984 // call. Then we call the resolved function using the given
2985 // arguments.
2986 // Prepare stack for call to resolved function.
2987 LoadAndSpill(function);
2988 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2989 frame_->EmitPush(r2); // Slot for receiver
2990 int arg_count = args->length();
2991 for (int i = 0; i < arg_count; i++) {
2992 LoadAndSpill(args->at(i));
2993 }
2994
2995 // Prepare stack for call to ResolvePossiblyDirectEval.
2996 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2997 frame_->EmitPush(r1);
2998 if (arg_count > 0) {
2999 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3000 frame_->EmitPush(r1);
3001 } else {
3002 frame_->EmitPush(r2);
3003 }
3004
Leon Clarkee46be812010-01-19 14:06:41 +00003005 // Push the receiver.
3006 __ ldr(r1, frame_->Receiver());
3007 frame_->EmitPush(r1);
3008
Steve Blocka7e24c12009-10-30 11:49:00 +00003009 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00003010 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003011
3012 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00003013 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00003014 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3015
3016 // Call the function.
3017 CodeForSourcePosition(node->position());
3018
3019 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00003020 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003021 frame_->CallStub(&call_function, arg_count + 1);
3022
3023 __ ldr(cp, frame_->Context());
3024 // Remove the function from the stack.
3025 frame_->Drop();
3026 frame_->EmitPush(r0);
3027
3028 } else if (var != NULL && !var->is_this() && var->is_global()) {
3029 // ----------------------------------
3030 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3031 // ----------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00003032 // Pass the global object as the receiver and let the IC stub
3033 // patch the stack to use the global proxy as 'this' in the
3034 // invoked function.
3035 LoadGlobal();
3036
3037 // Load the arguments.
3038 int arg_count = args->length();
3039 for (int i = 0; i < arg_count; i++) {
3040 LoadAndSpill(args->at(i));
3041 }
3042
Andrei Popescu402d9372010-02-26 13:31:12 +00003043 // Setup the name register and call the IC initialization code.
3044 __ mov(r2, Operand(var->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003045 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3046 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3047 CodeForSourcePosition(node->position());
3048 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3049 arg_count + 1);
3050 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00003051 frame_->EmitPush(r0);
3052
3053 } else if (var != NULL && var->slot() != NULL &&
3054 var->slot()->type() == Slot::LOOKUP) {
3055 // ----------------------------------
3056 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3057 // ----------------------------------
3058
3059 // Load the function
3060 frame_->EmitPush(cp);
3061 __ mov(r0, Operand(var->name()));
3062 frame_->EmitPush(r0);
3063 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3064 // r0: slot value; r1: receiver
3065
3066 // Load the receiver.
3067 frame_->EmitPush(r0); // function
3068 frame_->EmitPush(r1); // receiver
3069
3070 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003071 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003072 frame_->EmitPush(r0);
3073
3074 } else if (property != NULL) {
3075 // Check if the key is a literal string.
3076 Literal* literal = property->key()->AsLiteral();
3077
3078 if (literal != NULL && literal->handle()->IsSymbol()) {
3079 // ------------------------------------------------------------------
3080 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3081 // ------------------------------------------------------------------
3082
Andrei Popescu402d9372010-02-26 13:31:12 +00003083 LoadAndSpill(property->obj()); // Receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003084 // Load the arguments.
3085 int arg_count = args->length();
3086 for (int i = 0; i < arg_count; i++) {
3087 LoadAndSpill(args->at(i));
3088 }
3089
Andrei Popescu402d9372010-02-26 13:31:12 +00003090 // Set the name register and call the IC initialization code.
3091 __ mov(r2, Operand(literal->handle()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003092 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3093 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3094 CodeForSourcePosition(node->position());
3095 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3096 __ ldr(cp, frame_->Context());
Andrei Popescu402d9372010-02-26 13:31:12 +00003097 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003098
3099 } else {
3100 // -------------------------------------------
3101 // JavaScript example: 'array[index](1, 2, 3)'
3102 // -------------------------------------------
3103
Leon Clarked91b9f72010-01-27 17:25:45 +00003104 LoadAndSpill(property->obj());
3105 LoadAndSpill(property->key());
3106 EmitKeyedLoad(false);
3107 frame_->Drop(); // key
3108 // Put the function below the receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 if (property->is_synthetic()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00003110 // Use the global receiver.
3111 frame_->Drop();
3112 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003113 LoadGlobalReceiver(r0);
3114 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00003115 frame_->EmitPop(r1); // receiver
3116 frame_->EmitPush(r0); // function
3117 frame_->EmitPush(r1); // receiver
Steve Blocka7e24c12009-10-30 11:49:00 +00003118 }
3119
3120 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003121 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003122 frame_->EmitPush(r0);
3123 }
3124
3125 } else {
3126 // ----------------------------------
3127 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3128 // ----------------------------------
3129
3130 // Load the function.
3131 LoadAndSpill(function);
3132
3133 // Pass the global proxy as the receiver.
3134 LoadGlobalReceiver(r0);
3135
3136 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003137 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003138 frame_->EmitPush(r0);
3139 }
3140 ASSERT(frame_->height() == original_height + 1);
3141}
3142
3143
3144void CodeGenerator::VisitCallNew(CallNew* node) {
3145#ifdef DEBUG
3146 int original_height = frame_->height();
3147#endif
3148 VirtualFrame::SpilledScope spilled_scope;
3149 Comment cmnt(masm_, "[ CallNew");
3150
3151 // According to ECMA-262, section 11.2.2, page 44, the function
3152 // expression in new calls must be evaluated before the
3153 // arguments. This is different from ordinary calls, where the
3154 // actual function to call is resolved after the arguments have been
3155 // evaluated.
3156
3157 // Compute function to call and use the global object as the
3158 // receiver. There is no need to use the global proxy here because
3159 // it will always be replaced with a newly allocated object.
3160 LoadAndSpill(node->expression());
3161 LoadGlobal();
3162
3163 // Push the arguments ("left-to-right") on the stack.
3164 ZoneList<Expression*>* args = node->arguments();
3165 int arg_count = args->length();
3166 for (int i = 0; i < arg_count; i++) {
3167 LoadAndSpill(args->at(i));
3168 }
3169
3170 // r0: the number of arguments.
Steve Blocka7e24c12009-10-30 11:49:00 +00003171 __ mov(r0, Operand(arg_count));
Steve Blocka7e24c12009-10-30 11:49:00 +00003172 // Load the function into r1 as per calling convention.
Steve Blocka7e24c12009-10-30 11:49:00 +00003173 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3174
3175 // Call the construct call builtin that handles allocation and
3176 // constructor invocation.
3177 CodeForSourcePosition(node->position());
3178 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
Leon Clarke4515c472010-02-03 11:58:03 +00003179 frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003180
3181 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3182 __ str(r0, frame_->Top());
3183 ASSERT(frame_->height() == original_height + 1);
3184}
3185
3186
3187void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3188 VirtualFrame::SpilledScope spilled_scope;
3189 ASSERT(args->length() == 1);
3190 JumpTarget leave, null, function, non_function_constructor;
3191
3192 // Load the object into r0.
3193 LoadAndSpill(args->at(0));
3194 frame_->EmitPop(r0);
3195
3196 // If the object is a smi, we return null.
3197 __ tst(r0, Operand(kSmiTagMask));
3198 null.Branch(eq);
3199
3200 // Check that the object is a JS object but take special care of JS
3201 // functions to make sure they have 'Function' as their class.
3202 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3203 null.Branch(lt);
3204
3205 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3206 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3207 // LAST_JS_OBJECT_TYPE.
3208 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3209 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3210 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3211 function.Branch(eq);
3212
3213 // Check if the constructor in the map is a function.
3214 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3215 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3216 non_function_constructor.Branch(ne);
3217
3218 // The r0 register now contains the constructor function. Grab the
3219 // instance class name from there.
3220 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3221 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3222 frame_->EmitPush(r0);
3223 leave.Jump();
3224
3225 // Functions have class 'Function'.
3226 function.Bind();
3227 __ mov(r0, Operand(Factory::function_class_symbol()));
3228 frame_->EmitPush(r0);
3229 leave.Jump();
3230
3231 // Objects with a non-function constructor have class 'Object'.
3232 non_function_constructor.Bind();
3233 __ mov(r0, Operand(Factory::Object_symbol()));
3234 frame_->EmitPush(r0);
3235 leave.Jump();
3236
3237 // Non-JS objects have class null.
3238 null.Bind();
3239 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3240 frame_->EmitPush(r0);
3241
3242 // All done.
3243 leave.Bind();
3244}
3245
3246
3247void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3248 VirtualFrame::SpilledScope spilled_scope;
3249 ASSERT(args->length() == 1);
3250 JumpTarget leave;
3251 LoadAndSpill(args->at(0));
3252 frame_->EmitPop(r0); // r0 contains object.
3253 // if (object->IsSmi()) return the object.
3254 __ tst(r0, Operand(kSmiTagMask));
3255 leave.Branch(eq);
3256 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3257 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3258 leave.Branch(ne);
3259 // Load the value.
3260 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3261 leave.Bind();
3262 frame_->EmitPush(r0);
3263}
3264
3265
3266void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3267 VirtualFrame::SpilledScope spilled_scope;
3268 ASSERT(args->length() == 2);
3269 JumpTarget leave;
3270 LoadAndSpill(args->at(0)); // Load the object.
3271 LoadAndSpill(args->at(1)); // Load the value.
3272 frame_->EmitPop(r0); // r0 contains value
3273 frame_->EmitPop(r1); // r1 contains object
3274 // if (object->IsSmi()) return object.
3275 __ tst(r1, Operand(kSmiTagMask));
3276 leave.Branch(eq);
3277 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3278 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3279 leave.Branch(ne);
3280 // Store the value.
3281 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3282 // Update the write barrier.
3283 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3284 __ RecordWrite(r1, r2, r3);
3285 // Leave.
3286 leave.Bind();
3287 frame_->EmitPush(r0);
3288}
3289
3290
3291void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3292 VirtualFrame::SpilledScope spilled_scope;
3293 ASSERT(args->length() == 1);
3294 LoadAndSpill(args->at(0));
3295 frame_->EmitPop(r0);
3296 __ tst(r0, Operand(kSmiTagMask));
3297 cc_reg_ = eq;
3298}
3299
3300
3301void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3302 VirtualFrame::SpilledScope spilled_scope;
3303 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3304 ASSERT_EQ(args->length(), 3);
3305#ifdef ENABLE_LOGGING_AND_PROFILING
3306 if (ShouldGenerateLog(args->at(0))) {
3307 LoadAndSpill(args->at(1));
3308 LoadAndSpill(args->at(2));
3309 __ CallRuntime(Runtime::kLog, 2);
3310 }
3311#endif
3312 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3313 frame_->EmitPush(r0);
3314}
3315
3316
3317void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3318 VirtualFrame::SpilledScope spilled_scope;
3319 ASSERT(args->length() == 1);
3320 LoadAndSpill(args->at(0));
3321 frame_->EmitPop(r0);
3322 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3323 cc_reg_ = eq;
3324}
3325
3326
3327// This should generate code that performs a charCodeAt() call or returns
3328// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3329// It is not yet implemented on ARM, so it always goes to the slow case.
3330void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3331 VirtualFrame::SpilledScope spilled_scope;
3332 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003333 Comment(masm_, "[ GenerateFastCharCodeAt");
3334
3335 LoadAndSpill(args->at(0));
3336 LoadAndSpill(args->at(1));
3337 frame_->EmitPop(r0); // Index.
3338 frame_->EmitPop(r1); // String.
3339
3340 Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
3341
3342 __ tst(r1, Operand(kSmiTagMask));
3343 __ b(eq, &slow); // The 'string' was a Smi.
3344
3345 ASSERT(kSmiTag == 0);
3346 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3347 __ b(ne, &slow); // The index was negative or not a Smi.
3348
3349 __ bind(&try_again_with_new_string);
3350 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
3351 __ b(ge, &slow);
3352
3353 // Now r2 has the string type.
3354 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
3355 // Now r3 has the length of the string. Compare with the index.
3356 __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
3357 __ b(le, &slow);
3358
3359 // Here we know the index is in range. Check that string is sequential.
3360 ASSERT_EQ(0, kSeqStringTag);
3361 __ tst(r2, Operand(kStringRepresentationMask));
3362 __ b(ne, &not_a_flat_string);
3363
3364 // Check whether it is an ASCII string.
3365 ASSERT_EQ(0, kTwoByteStringTag);
3366 __ tst(r2, Operand(kStringEncodingMask));
3367 __ b(ne, &ascii_string);
3368
3369 // 2-byte string. We can add without shifting since the Smi tag size is the
3370 // log2 of the number of bytes in a two-byte character.
3371 ASSERT_EQ(1, kSmiTagSize);
3372 ASSERT_EQ(0, kSmiShiftSize);
3373 __ add(r1, r1, Operand(r0));
3374 __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
3375 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3376 __ jmp(&end);
3377
3378 __ bind(&ascii_string);
3379 __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
3380 __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
3381 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3382 __ jmp(&end);
3383
3384 __ bind(&not_a_flat_string);
3385 __ and_(r2, r2, Operand(kStringRepresentationMask));
3386 __ cmp(r2, Operand(kConsStringTag));
3387 __ b(ne, &slow);
3388
3389 // ConsString.
3390 // Check that the right hand side is the empty string (ie if this is really a
3391 // flat string in a cons string). If that is not the case we would rather go
3392 // to the runtime system now, to flatten the string.
3393 __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
3394 __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
3395 __ cmp(r2, Operand(r3));
3396 __ b(ne, &slow);
3397
3398 // Get the first of the two strings.
3399 __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
3400 __ jmp(&try_again_with_new_string);
3401
3402 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00003403 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00003404
3405 __ bind(&end);
Steve Blocka7e24c12009-10-30 11:49:00 +00003406 frame_->EmitPush(r0);
3407}
3408
3409
3410void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3411 VirtualFrame::SpilledScope spilled_scope;
3412 ASSERT(args->length() == 1);
3413 LoadAndSpill(args->at(0));
3414 JumpTarget answer;
3415 // We need the CC bits to come out as not_equal in the case where the
3416 // object is a smi. This can't be done with the usual test opcode so
3417 // we use XOR to get the right CC bits.
3418 frame_->EmitPop(r0);
3419 __ and_(r1, r0, Operand(kSmiTagMask));
3420 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3421 answer.Branch(ne);
3422 // It is a heap object - get the map. Check if the object is a JS array.
3423 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3424 answer.Bind();
3425 cc_reg_ = eq;
3426}
3427
3428
Andrei Popescu402d9372010-02-26 13:31:12 +00003429void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
3430 VirtualFrame::SpilledScope spilled_scope;
3431 ASSERT(args->length() == 1);
3432 LoadAndSpill(args->at(0));
3433 JumpTarget answer;
3434 // We need the CC bits to come out as not_equal in the case where the
3435 // object is a smi. This can't be done with the usual test opcode so
3436 // we use XOR to get the right CC bits.
3437 frame_->EmitPop(r0);
3438 __ and_(r1, r0, Operand(kSmiTagMask));
3439 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3440 answer.Branch(ne);
3441 // It is a heap object - get the map. Check if the object is a regexp.
3442 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
3443 answer.Bind();
3444 cc_reg_ = eq;
3445}
3446
3447
Steve Blockd0582a62009-12-15 09:54:21 +00003448void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3449 // This generates a fast version of:
3450 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3451 VirtualFrame::SpilledScope spilled_scope;
3452 ASSERT(args->length() == 1);
3453 LoadAndSpill(args->at(0));
3454 frame_->EmitPop(r1);
3455 __ tst(r1, Operand(kSmiTagMask));
3456 false_target()->Branch(eq);
3457
3458 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3459 __ cmp(r1, ip);
3460 true_target()->Branch(eq);
3461
3462 Register map_reg = r2;
3463 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
3464 // Undetectable objects behave like undefined when tested with typeof.
3465 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
3466 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
3467 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
3468 false_target()->Branch(eq);
3469
3470 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
3471 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
3472 false_target()->Branch(lt);
3473 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
3474 cc_reg_ = le;
3475}
3476
3477
3478void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3479 // This generates a fast version of:
3480 // (%_ClassOf(arg) === 'Function')
3481 VirtualFrame::SpilledScope spilled_scope;
3482 ASSERT(args->length() == 1);
3483 LoadAndSpill(args->at(0));
3484 frame_->EmitPop(r0);
3485 __ tst(r0, Operand(kSmiTagMask));
3486 false_target()->Branch(eq);
3487 Register map_reg = r2;
3488 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
3489 cc_reg_ = eq;
3490}
3491
3492
Leon Clarked91b9f72010-01-27 17:25:45 +00003493void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3494 VirtualFrame::SpilledScope spilled_scope;
3495 ASSERT(args->length() == 1);
3496 LoadAndSpill(args->at(0));
3497 frame_->EmitPop(r0);
3498 __ tst(r0, Operand(kSmiTagMask));
3499 false_target()->Branch(eq);
3500 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3501 __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
3502 __ tst(r1, Operand(1 << Map::kIsUndetectable));
3503 cc_reg_ = ne;
3504}
3505
3506
Steve Blocka7e24c12009-10-30 11:49:00 +00003507void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3508 VirtualFrame::SpilledScope spilled_scope;
3509 ASSERT(args->length() == 0);
3510
3511 // Get the frame pointer for the calling frame.
3512 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3513
3514 // Skip the arguments adaptor frame if it exists.
3515 Label check_frame_marker;
3516 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3517 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3518 __ b(ne, &check_frame_marker);
3519 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3520
3521 // Check the marker in the calling frame.
3522 __ bind(&check_frame_marker);
3523 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3524 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3525 cc_reg_ = eq;
3526}
3527
3528
3529void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3530 VirtualFrame::SpilledScope spilled_scope;
3531 ASSERT(args->length() == 0);
3532
3533 // Seed the result with the formal parameters count, which will be used
3534 // in case no arguments adaptor frame is found below the current frame.
Andrei Popescu31002712010-02-23 13:46:05 +00003535 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00003536
3537 // Call the shared stub to get to the arguments.length.
3538 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3539 frame_->CallStub(&stub, 0);
3540 frame_->EmitPush(r0);
3541}
3542
3543
3544void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3545 VirtualFrame::SpilledScope spilled_scope;
3546 ASSERT(args->length() == 1);
3547
3548 // Satisfy contract with ArgumentsAccessStub:
3549 // Load the key into r1 and the formal parameters count into r0.
3550 LoadAndSpill(args->at(0));
3551 frame_->EmitPop(r1);
Andrei Popescu31002712010-02-23 13:46:05 +00003552 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00003553
3554 // Call the shared stub to get to arguments[key].
3555 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3556 frame_->CallStub(&stub, 0);
3557 frame_->EmitPush(r0);
3558}
3559
3560
3561void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3562 VirtualFrame::SpilledScope spilled_scope;
3563 ASSERT(args->length() == 0);
3564 __ Call(ExternalReference::random_positive_smi_function().address(),
3565 RelocInfo::RUNTIME_ENTRY);
3566 frame_->EmitPush(r0);
3567}
3568
3569
Steve Blockd0582a62009-12-15 09:54:21 +00003570void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3571 ASSERT_EQ(2, args->length());
3572
3573 Load(args->at(0));
3574 Load(args->at(1));
3575
Andrei Popescu31002712010-02-23 13:46:05 +00003576 StringAddStub stub(NO_STRING_ADD_FLAGS);
3577 frame_->CallStub(&stub, 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003578 frame_->EmitPush(r0);
3579}
3580
3581
Leon Clarkee46be812010-01-19 14:06:41 +00003582void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
3583 ASSERT_EQ(3, args->length());
3584
3585 Load(args->at(0));
3586 Load(args->at(1));
3587 Load(args->at(2));
3588
Andrei Popescu31002712010-02-23 13:46:05 +00003589 SubStringStub stub;
3590 frame_->CallStub(&stub, 3);
Leon Clarkee46be812010-01-19 14:06:41 +00003591 frame_->EmitPush(r0);
3592}
3593
3594
3595void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
3596 ASSERT_EQ(2, args->length());
3597
3598 Load(args->at(0));
3599 Load(args->at(1));
3600
Leon Clarked91b9f72010-01-27 17:25:45 +00003601 StringCompareStub stub;
3602 frame_->CallStub(&stub, 2);
Leon Clarkee46be812010-01-19 14:06:41 +00003603 frame_->EmitPush(r0);
3604}
3605
3606
3607void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
3608 ASSERT_EQ(4, args->length());
3609
3610 Load(args->at(0));
3611 Load(args->at(1));
3612 Load(args->at(2));
3613 Load(args->at(3));
3614
3615 frame_->CallRuntime(Runtime::kRegExpExec, 4);
3616 frame_->EmitPush(r0);
3617}
3618
3619
Andrei Popescu402d9372010-02-26 13:31:12 +00003620void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
3621 ASSERT_EQ(args->length(), 1);
3622
3623 // Load the argument on the stack and jump to the runtime.
3624 Load(args->at(0));
3625
3626 frame_->CallRuntime(Runtime::kNumberToString, 1);
3627 frame_->EmitPush(r0);
3628}
3629
3630
3631void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
3632 ASSERT_EQ(args->length(), 1);
3633 // Load the argument on the stack and jump to the runtime.
3634 Load(args->at(0));
3635 frame_->CallRuntime(Runtime::kMath_sin, 1);
3636 frame_->EmitPush(r0);
3637}
3638
3639
3640void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
3641 ASSERT_EQ(args->length(), 1);
3642 // Load the argument on the stack and jump to the runtime.
3643 Load(args->at(0));
3644 frame_->CallRuntime(Runtime::kMath_cos, 1);
3645 frame_->EmitPush(r0);
3646}
3647
3648
Steve Blocka7e24c12009-10-30 11:49:00 +00003649void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3650 VirtualFrame::SpilledScope spilled_scope;
3651 ASSERT(args->length() == 2);
3652
3653 // Load the two objects into registers and perform the comparison.
3654 LoadAndSpill(args->at(0));
3655 LoadAndSpill(args->at(1));
3656 frame_->EmitPop(r0);
3657 frame_->EmitPop(r1);
3658 __ cmp(r0, Operand(r1));
3659 cc_reg_ = eq;
3660}
3661
3662
3663void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3664#ifdef DEBUG
3665 int original_height = frame_->height();
3666#endif
3667 VirtualFrame::SpilledScope spilled_scope;
3668 if (CheckForInlineRuntimeCall(node)) {
3669 ASSERT((has_cc() && frame_->height() == original_height) ||
3670 (!has_cc() && frame_->height() == original_height + 1));
3671 return;
3672 }
3673
3674 ZoneList<Expression*>* args = node->arguments();
3675 Comment cmnt(masm_, "[ CallRuntime");
3676 Runtime::Function* function = node->function();
3677
3678 if (function == NULL) {
3679 // Prepare stack for calling JS runtime function.
Steve Blocka7e24c12009-10-30 11:49:00 +00003680 // Push the builtins object found in the current global object.
3681 __ ldr(r1, GlobalObject());
3682 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3683 frame_->EmitPush(r0);
3684 }
3685
3686 // Push the arguments ("left-to-right").
3687 int arg_count = args->length();
3688 for (int i = 0; i < arg_count; i++) {
3689 LoadAndSpill(args->at(i));
3690 }
3691
3692 if (function == NULL) {
3693 // Call the JS runtime function.
Andrei Popescu402d9372010-02-26 13:31:12 +00003694 __ mov(r2, Operand(node->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003695 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3696 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3697 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3698 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00003699 frame_->EmitPush(r0);
3700 } else {
3701 // Call the C runtime function.
3702 frame_->CallRuntime(function, arg_count);
3703 frame_->EmitPush(r0);
3704 }
3705 ASSERT(frame_->height() == original_height + 1);
3706}
3707
3708
3709void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3710#ifdef DEBUG
3711 int original_height = frame_->height();
3712#endif
3713 VirtualFrame::SpilledScope spilled_scope;
3714 Comment cmnt(masm_, "[ UnaryOperation");
3715
3716 Token::Value op = node->op();
3717
3718 if (op == Token::NOT) {
3719 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003720 false_target(),
3721 true_target(),
3722 true);
3723 // LoadCondition may (and usually does) leave a test and branch to
3724 // be emitted by the caller. In that case, negate the condition.
3725 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3726
3727 } else if (op == Token::DELETE) {
3728 Property* property = node->expression()->AsProperty();
3729 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3730 if (property != NULL) {
3731 LoadAndSpill(property->obj());
3732 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00003733 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003734
3735 } else if (variable != NULL) {
3736 Slot* slot = variable->slot();
3737 if (variable->is_global()) {
3738 LoadGlobal();
3739 __ mov(r0, Operand(variable->name()));
3740 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003741 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003742
3743 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3744 // lookup the context holding the named variable
3745 frame_->EmitPush(cp);
3746 __ mov(r0, Operand(variable->name()));
3747 frame_->EmitPush(r0);
3748 frame_->CallRuntime(Runtime::kLookupContext, 2);
3749 // r0: context
3750 frame_->EmitPush(r0);
3751 __ mov(r0, Operand(variable->name()));
3752 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003753 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003754
3755 } else {
3756 // Default: Result of deleting non-global, not dynamically
3757 // introduced variables is false.
3758 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3759 }
3760
3761 } else {
3762 // Default: Result of deleting expressions is true.
3763 LoadAndSpill(node->expression()); // may have side-effects
3764 frame_->Drop();
3765 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3766 }
3767 frame_->EmitPush(r0);
3768
3769 } else if (op == Token::TYPEOF) {
3770 // Special case for loading the typeof expression; see comment on
3771 // LoadTypeofExpression().
3772 LoadTypeofExpression(node->expression());
3773 frame_->CallRuntime(Runtime::kTypeof, 1);
3774 frame_->EmitPush(r0); // r0 has result
3775
3776 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00003777 bool overwrite =
3778 (node->expression()->AsBinaryOperation() != NULL &&
3779 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00003780 LoadAndSpill(node->expression());
3781 frame_->EmitPop(r0);
3782 switch (op) {
3783 case Token::NOT:
3784 case Token::DELETE:
3785 case Token::TYPEOF:
3786 UNREACHABLE(); // handled above
3787 break;
3788
3789 case Token::SUB: {
Leon Clarkee46be812010-01-19 14:06:41 +00003790 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00003791 frame_->CallStub(&stub, 0);
3792 break;
3793 }
3794
3795 case Token::BIT_NOT: {
3796 // smi check
3797 JumpTarget smi_label;
3798 JumpTarget continue_label;
3799 __ tst(r0, Operand(kSmiTagMask));
3800 smi_label.Branch(eq);
3801
Leon Clarke4515c472010-02-03 11:58:03 +00003802 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
3803 frame_->CallStub(&stub, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003804 continue_label.Jump();
Leon Clarke4515c472010-02-03 11:58:03 +00003805
Steve Blocka7e24c12009-10-30 11:49:00 +00003806 smi_label.Bind();
3807 __ mvn(r0, Operand(r0));
3808 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3809 continue_label.Bind();
3810 break;
3811 }
3812
3813 case Token::VOID:
3814 // since the stack top is cached in r0, popping and then
3815 // pushing a value can be done by just writing to r0.
3816 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3817 break;
3818
3819 case Token::ADD: {
3820 // Smi check.
3821 JumpTarget continue_label;
3822 __ tst(r0, Operand(kSmiTagMask));
3823 continue_label.Branch(eq);
3824 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003825 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003826 continue_label.Bind();
3827 break;
3828 }
3829 default:
3830 UNREACHABLE();
3831 }
3832 frame_->EmitPush(r0); // r0 has result
3833 }
3834 ASSERT(!has_valid_frame() ||
3835 (has_cc() && frame_->height() == original_height) ||
3836 (!has_cc() && frame_->height() == original_height + 1));
3837}
3838
3839
3840void CodeGenerator::VisitCountOperation(CountOperation* node) {
3841#ifdef DEBUG
3842 int original_height = frame_->height();
3843#endif
3844 VirtualFrame::SpilledScope spilled_scope;
3845 Comment cmnt(masm_, "[ CountOperation");
3846
3847 bool is_postfix = node->is_postfix();
3848 bool is_increment = node->op() == Token::INC;
3849
3850 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3851 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3852
3853 // Postfix: Make room for the result.
3854 if (is_postfix) {
3855 __ mov(r0, Operand(0));
3856 frame_->EmitPush(r0);
3857 }
3858
Leon Clarked91b9f72010-01-27 17:25:45 +00003859 // A constant reference is not saved to, so a constant reference is not a
3860 // compound assignment reference.
3861 { Reference target(this, node->expression(), !is_const);
Steve Blocka7e24c12009-10-30 11:49:00 +00003862 if (target.is_illegal()) {
3863 // Spoof the virtual frame to have the expected height (one higher
3864 // than on entry).
3865 if (!is_postfix) {
3866 __ mov(r0, Operand(Smi::FromInt(0)));
3867 frame_->EmitPush(r0);
3868 }
3869 ASSERT(frame_->height() == original_height + 1);
3870 return;
3871 }
Steve Blockd0582a62009-12-15 09:54:21 +00003872 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00003873 frame_->EmitPop(r0);
3874
3875 JumpTarget slow;
3876 JumpTarget exit;
3877
3878 // Load the value (1) into register r1.
3879 __ mov(r1, Operand(Smi::FromInt(1)));
3880
3881 // Check for smi operand.
3882 __ tst(r0, Operand(kSmiTagMask));
3883 slow.Branch(ne);
3884
3885 // Postfix: Store the old value as the result.
3886 if (is_postfix) {
3887 __ str(r0, frame_->ElementAt(target.size()));
3888 }
3889
3890 // Perform optimistic increment/decrement.
3891 if (is_increment) {
3892 __ add(r0, r0, Operand(r1), SetCC);
3893 } else {
3894 __ sub(r0, r0, Operand(r1), SetCC);
3895 }
3896
3897 // If the increment/decrement didn't overflow, we're done.
3898 exit.Branch(vc);
3899
3900 // Revert optimistic increment/decrement.
3901 if (is_increment) {
3902 __ sub(r0, r0, Operand(r1));
3903 } else {
3904 __ add(r0, r0, Operand(r1));
3905 }
3906
3907 // Slow case: Convert to number.
3908 slow.Bind();
3909 {
3910 // Convert the operand to a number.
3911 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003912 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003913 }
3914 if (is_postfix) {
3915 // Postfix: store to result (on the stack).
3916 __ str(r0, frame_->ElementAt(target.size()));
3917 }
3918
3919 // Compute the new value.
3920 __ mov(r1, Operand(Smi::FromInt(1)));
3921 frame_->EmitPush(r0);
3922 frame_->EmitPush(r1);
3923 if (is_increment) {
3924 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3925 } else {
3926 frame_->CallRuntime(Runtime::kNumberSub, 2);
3927 }
3928
3929 // Store the new value in the target if not const.
3930 exit.Bind();
3931 frame_->EmitPush(r0);
3932 if (!is_const) target.SetValue(NOT_CONST_INIT);
3933 }
3934
3935 // Postfix: Discard the new value and use the old.
3936 if (is_postfix) frame_->EmitPop(r0);
3937 ASSERT(frame_->height() == original_height + 1);
3938}
3939
3940
3941void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3942#ifdef DEBUG
3943 int original_height = frame_->height();
3944#endif
3945 VirtualFrame::SpilledScope spilled_scope;
3946 Comment cmnt(masm_, "[ BinaryOperation");
3947 Token::Value op = node->op();
3948
3949 // According to ECMA-262 section 11.11, page 58, the binary logical
3950 // operators must yield the result of one of the two expressions
3951 // before any ToBoolean() conversions. This means that the value
3952 // produced by a && or || operator is not necessarily a boolean.
3953
3954 // NOTE: If the left hand side produces a materialized value (not in
3955 // the CC register), we force the right hand side to do the
3956 // same. This is necessary because we may have to branch to the exit
3957 // after evaluating the left hand side (due to the shortcut
3958 // semantics), but the compiler must (statically) know if the result
3959 // of compiling the binary operation is materialized or not.
3960
3961 if (op == Token::AND) {
3962 JumpTarget is_true;
3963 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003964 &is_true,
3965 false_target(),
3966 false);
3967 if (has_valid_frame() && !has_cc()) {
3968 // The left-hand side result is on top of the virtual frame.
3969 JumpTarget pop_and_continue;
3970 JumpTarget exit;
3971
3972 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3973 frame_->EmitPush(r0);
3974 // Avoid popping the result if it converts to 'false' using the
3975 // standard ToBoolean() conversion as described in ECMA-262,
3976 // section 9.2, page 30.
3977 ToBoolean(&pop_and_continue, &exit);
3978 Branch(false, &exit);
3979
3980 // Pop the result of evaluating the first part.
3981 pop_and_continue.Bind();
3982 frame_->EmitPop(r0);
3983
3984 // Evaluate right side expression.
3985 is_true.Bind();
3986 LoadAndSpill(node->right());
3987
3988 // Exit (always with a materialized value).
3989 exit.Bind();
3990 } else if (has_cc() || is_true.is_linked()) {
3991 // The left-hand side is either (a) partially compiled to
3992 // control flow with a final branch left to emit or (b) fully
3993 // compiled to control flow and possibly true.
3994 if (has_cc()) {
3995 Branch(false, false_target());
3996 }
3997 is_true.Bind();
3998 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003999 true_target(),
4000 false_target(),
4001 false);
4002 } else {
4003 // Nothing to do.
4004 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
4005 }
4006
4007 } else if (op == Token::OR) {
4008 JumpTarget is_false;
4009 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00004010 true_target(),
4011 &is_false,
4012 false);
4013 if (has_valid_frame() && !has_cc()) {
4014 // The left-hand side result is on top of the virtual frame.
4015 JumpTarget pop_and_continue;
4016 JumpTarget exit;
4017
4018 __ ldr(r0, frame_->Top());
4019 frame_->EmitPush(r0);
4020 // Avoid popping the result if it converts to 'true' using the
4021 // standard ToBoolean() conversion as described in ECMA-262,
4022 // section 9.2, page 30.
4023 ToBoolean(&exit, &pop_and_continue);
4024 Branch(true, &exit);
4025
4026 // Pop the result of evaluating the first part.
4027 pop_and_continue.Bind();
4028 frame_->EmitPop(r0);
4029
4030 // Evaluate right side expression.
4031 is_false.Bind();
4032 LoadAndSpill(node->right());
4033
4034 // Exit (always with a materialized value).
4035 exit.Bind();
4036 } else if (has_cc() || is_false.is_linked()) {
4037 // The left-hand side is either (a) partially compiled to
4038 // control flow with a final branch left to emit or (b) fully
4039 // compiled to control flow and possibly false.
4040 if (has_cc()) {
4041 Branch(true, true_target());
4042 }
4043 is_false.Bind();
4044 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00004045 true_target(),
4046 false_target(),
4047 false);
4048 } else {
4049 // Nothing to do.
4050 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
4051 }
4052
4053 } else {
4054 // Optimize for the case where (at least) one of the expressions
4055 // is a literal small integer.
4056 Literal* lliteral = node->left()->AsLiteral();
4057 Literal* rliteral = node->right()->AsLiteral();
4058 // NOTE: The code below assumes that the slow cases (calls to runtime)
4059 // never return a constant/immutable object.
4060 bool overwrite_left =
4061 (node->left()->AsBinaryOperation() != NULL &&
4062 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
4063 bool overwrite_right =
4064 (node->right()->AsBinaryOperation() != NULL &&
4065 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
4066
4067 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
4068 LoadAndSpill(node->left());
4069 SmiOperation(node->op(),
4070 rliteral->handle(),
4071 false,
4072 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
4073
4074 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
4075 LoadAndSpill(node->right());
4076 SmiOperation(node->op(),
4077 lliteral->handle(),
4078 true,
4079 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
4080
4081 } else {
4082 OverwriteMode overwrite_mode = NO_OVERWRITE;
4083 if (overwrite_left) {
4084 overwrite_mode = OVERWRITE_LEFT;
4085 } else if (overwrite_right) {
4086 overwrite_mode = OVERWRITE_RIGHT;
4087 }
4088 LoadAndSpill(node->left());
4089 LoadAndSpill(node->right());
4090 GenericBinaryOperation(node->op(), overwrite_mode);
4091 }
4092 frame_->EmitPush(r0);
4093 }
4094 ASSERT(!has_valid_frame() ||
4095 (has_cc() && frame_->height() == original_height) ||
4096 (!has_cc() && frame_->height() == original_height + 1));
4097}
4098
4099
4100void CodeGenerator::VisitThisFunction(ThisFunction* node) {
4101#ifdef DEBUG
4102 int original_height = frame_->height();
4103#endif
4104 VirtualFrame::SpilledScope spilled_scope;
4105 __ ldr(r0, frame_->Function());
4106 frame_->EmitPush(r0);
4107 ASSERT(frame_->height() == original_height + 1);
4108}
4109
4110
4111void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
4112#ifdef DEBUG
4113 int original_height = frame_->height();
4114#endif
4115 VirtualFrame::SpilledScope spilled_scope;
4116 Comment cmnt(masm_, "[ CompareOperation");
4117
4118 // Get the expressions from the node.
4119 Expression* left = node->left();
4120 Expression* right = node->right();
4121 Token::Value op = node->op();
4122
4123 // To make null checks efficient, we check if either left or right is the
4124 // literal 'null'. If so, we optimize the code by inlining a null check
4125 // instead of calling the (very) general runtime routine for checking
4126 // equality.
4127 if (op == Token::EQ || op == Token::EQ_STRICT) {
4128 bool left_is_null =
4129 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
4130 bool right_is_null =
4131 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
4132 // The 'null' value can only be equal to 'null' or 'undefined'.
4133 if (left_is_null || right_is_null) {
4134 LoadAndSpill(left_is_null ? right : left);
4135 frame_->EmitPop(r0);
4136 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4137 __ cmp(r0, ip);
4138
4139 // The 'null' value is only equal to 'undefined' if using non-strict
4140 // comparisons.
4141 if (op != Token::EQ_STRICT) {
4142 true_target()->Branch(eq);
4143
4144 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4145 __ cmp(r0, Operand(ip));
4146 true_target()->Branch(eq);
4147
4148 __ tst(r0, Operand(kSmiTagMask));
4149 false_target()->Branch(eq);
4150
4151 // It can be an undetectable object.
4152 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
4153 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
4154 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
4155 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
4156 }
4157
4158 cc_reg_ = eq;
4159 ASSERT(has_cc() && frame_->height() == original_height);
4160 return;
4161 }
4162 }
4163
4164 // To make typeof testing for natives implemented in JavaScript really
4165 // efficient, we generate special code for expressions of the form:
4166 // 'typeof <expression> == <string>'.
4167 UnaryOperation* operation = left->AsUnaryOperation();
4168 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
4169 (operation != NULL && operation->op() == Token::TYPEOF) &&
4170 (right->AsLiteral() != NULL &&
4171 right->AsLiteral()->handle()->IsString())) {
4172 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
4173
4174 // Load the operand, move it to register r1.
4175 LoadTypeofExpression(operation->expression());
4176 frame_->EmitPop(r1);
4177
4178 if (check->Equals(Heap::number_symbol())) {
4179 __ tst(r1, Operand(kSmiTagMask));
4180 true_target()->Branch(eq);
4181 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4182 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4183 __ cmp(r1, ip);
4184 cc_reg_ = eq;
4185
4186 } else if (check->Equals(Heap::string_symbol())) {
4187 __ tst(r1, Operand(kSmiTagMask));
4188 false_target()->Branch(eq);
4189
4190 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4191
4192 // It can be an undetectable string object.
4193 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4194 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4195 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4196 false_target()->Branch(eq);
4197
4198 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4199 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
4200 cc_reg_ = lt;
4201
4202 } else if (check->Equals(Heap::boolean_symbol())) {
4203 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4204 __ cmp(r1, ip);
4205 true_target()->Branch(eq);
4206 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4207 __ cmp(r1, ip);
4208 cc_reg_ = eq;
4209
4210 } else if (check->Equals(Heap::undefined_symbol())) {
4211 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4212 __ cmp(r1, ip);
4213 true_target()->Branch(eq);
4214
4215 __ tst(r1, Operand(kSmiTagMask));
4216 false_target()->Branch(eq);
4217
4218 // It can be an undetectable object.
4219 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4220 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4221 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4222 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4223
4224 cc_reg_ = eq;
4225
4226 } else if (check->Equals(Heap::function_symbol())) {
4227 __ tst(r1, Operand(kSmiTagMask));
4228 false_target()->Branch(eq);
Steve Blockd0582a62009-12-15 09:54:21 +00004229 Register map_reg = r2;
4230 __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
4231 true_target()->Branch(eq);
4232 // Regular expressions are callable so typeof == 'function'.
4233 __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004234 cc_reg_ = eq;
4235
4236 } else if (check->Equals(Heap::object_symbol())) {
4237 __ tst(r1, Operand(kSmiTagMask));
4238 false_target()->Branch(eq);
4239
Steve Blocka7e24c12009-10-30 11:49:00 +00004240 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4241 __ cmp(r1, ip);
4242 true_target()->Branch(eq);
4243
Steve Blockd0582a62009-12-15 09:54:21 +00004244 Register map_reg = r2;
4245 __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
4246 false_target()->Branch(eq);
4247
Steve Blocka7e24c12009-10-30 11:49:00 +00004248 // It can be an undetectable object.
Steve Blockd0582a62009-12-15 09:54:21 +00004249 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004250 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4251 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4252 false_target()->Branch(eq);
4253
Steve Blockd0582a62009-12-15 09:54:21 +00004254 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4255 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004256 false_target()->Branch(lt);
Steve Blockd0582a62009-12-15 09:54:21 +00004257 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004258 cc_reg_ = le;
4259
4260 } else {
4261 // Uncommon case: typeof testing against a string literal that is
4262 // never returned from the typeof operator.
4263 false_target()->Jump();
4264 }
4265 ASSERT(!has_valid_frame() ||
4266 (has_cc() && frame_->height() == original_height));
4267 return;
4268 }
4269
4270 switch (op) {
4271 case Token::EQ:
4272 Comparison(eq, left, right, false);
4273 break;
4274
4275 case Token::LT:
4276 Comparison(lt, left, right);
4277 break;
4278
4279 case Token::GT:
4280 Comparison(gt, left, right);
4281 break;
4282
4283 case Token::LTE:
4284 Comparison(le, left, right);
4285 break;
4286
4287 case Token::GTE:
4288 Comparison(ge, left, right);
4289 break;
4290
4291 case Token::EQ_STRICT:
4292 Comparison(eq, left, right, true);
4293 break;
4294
4295 case Token::IN: {
4296 LoadAndSpill(left);
4297 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00004298 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004299 frame_->EmitPush(r0);
4300 break;
4301 }
4302
4303 case Token::INSTANCEOF: {
4304 LoadAndSpill(left);
4305 LoadAndSpill(right);
4306 InstanceofStub stub;
4307 frame_->CallStub(&stub, 2);
4308 // At this point if instanceof succeeded then r0 == 0.
4309 __ tst(r0, Operand(r0));
4310 cc_reg_ = eq;
4311 break;
4312 }
4313
4314 default:
4315 UNREACHABLE();
4316 }
4317 ASSERT((has_cc() && frame_->height() == original_height) ||
4318 (!has_cc() && frame_->height() == original_height + 1));
4319}
4320
4321
Leon Clarked91b9f72010-01-27 17:25:45 +00004322void CodeGenerator::EmitKeyedLoad(bool is_global) {
4323 Comment cmnt(masm_, "[ Load from keyed Property");
4324 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4325 RelocInfo::Mode rmode = is_global
4326 ? RelocInfo::CODE_TARGET_CONTEXT
4327 : RelocInfo::CODE_TARGET;
4328 frame_->CallCodeObject(ic, rmode, 0);
4329}
4330
4331
Steve Blocka7e24c12009-10-30 11:49:00 +00004332#ifdef DEBUG
4333bool CodeGenerator::HasValidEntryRegisters() { return true; }
4334#endif
4335
4336
4337#undef __
4338#define __ ACCESS_MASM(masm)
4339
4340
4341Handle<String> Reference::GetName() {
4342 ASSERT(type_ == NAMED);
4343 Property* property = expression_->AsProperty();
4344 if (property == NULL) {
4345 // Global variable reference treated as a named property reference.
4346 VariableProxy* proxy = expression_->AsVariableProxy();
4347 ASSERT(proxy->AsVariable() != NULL);
4348 ASSERT(proxy->AsVariable()->is_global());
4349 return proxy->name();
4350 } else {
4351 Literal* raw_name = property->key()->AsLiteral();
4352 ASSERT(raw_name != NULL);
4353 return Handle<String>(String::cast(*raw_name->handle()));
4354 }
4355}
4356
4357
Steve Blockd0582a62009-12-15 09:54:21 +00004358void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004359 ASSERT(cgen_->HasValidEntryRegisters());
4360 ASSERT(!is_illegal());
4361 ASSERT(!cgen_->has_cc());
4362 MacroAssembler* masm = cgen_->masm();
4363 Property* property = expression_->AsProperty();
4364 if (property != NULL) {
4365 cgen_->CodeForSourcePosition(property->position());
4366 }
4367
4368 switch (type_) {
4369 case SLOT: {
4370 Comment cmnt(masm, "[ Load from Slot");
4371 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4372 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00004373 cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004374 break;
4375 }
4376
4377 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004378 VirtualFrame* frame = cgen_->frame();
4379 Comment cmnt(masm, "[ Load from named Property");
4380 Handle<String> name(GetName());
4381 Variable* var = expression_->AsVariableProxy()->AsVariable();
4382 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4383 // Setup the name register.
Steve Blocka7e24c12009-10-30 11:49:00 +00004384 __ mov(r2, Operand(name));
4385 ASSERT(var == NULL || var->is_global());
4386 RelocInfo::Mode rmode = (var == NULL)
4387 ? RelocInfo::CODE_TARGET
4388 : RelocInfo::CODE_TARGET_CONTEXT;
Leon Clarke4515c472010-02-03 11:58:03 +00004389 frame->CallCodeObject(ic, rmode, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004390 frame->EmitPush(r0);
4391 break;
4392 }
4393
4394 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004395 // TODO(181): Implement inlined version of array indexing once
4396 // loop nesting is properly tracked on ARM.
Steve Blocka7e24c12009-10-30 11:49:00 +00004397 ASSERT(property != NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00004398 Variable* var = expression_->AsVariableProxy()->AsVariable();
4399 ASSERT(var == NULL || var->is_global());
Leon Clarked91b9f72010-01-27 17:25:45 +00004400 cgen_->EmitKeyedLoad(var != NULL);
4401 cgen_->frame()->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004402 break;
4403 }
4404
4405 default:
4406 UNREACHABLE();
4407 }
Leon Clarked91b9f72010-01-27 17:25:45 +00004408
4409 if (!persist_after_get_) {
4410 cgen_->UnloadReference(this);
4411 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004412}
4413
4414
4415void Reference::SetValue(InitState init_state) {
4416 ASSERT(!is_illegal());
4417 ASSERT(!cgen_->has_cc());
4418 MacroAssembler* masm = cgen_->masm();
4419 VirtualFrame* frame = cgen_->frame();
4420 Property* property = expression_->AsProperty();
4421 if (property != NULL) {
4422 cgen_->CodeForSourcePosition(property->position());
4423 }
4424
4425 switch (type_) {
4426 case SLOT: {
4427 Comment cmnt(masm, "[ Store to Slot");
4428 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00004429 cgen_->StoreToSlot(slot, init_state);
Leon Clarke4515c472010-02-03 11:58:03 +00004430 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004431 break;
4432 }
4433
4434 case NAMED: {
4435 Comment cmnt(masm, "[ Store to named Property");
4436 // Call the appropriate IC code.
4437 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4438 Handle<String> name(GetName());
4439
Steve Blocka7e24c12009-10-30 11:49:00 +00004440 frame->EmitPop(r0);
Andrei Popescu402d9372010-02-26 13:31:12 +00004441 frame->EmitPop(r1);
Steve Blocka7e24c12009-10-30 11:49:00 +00004442 __ mov(r2, Operand(name));
Leon Clarke4515c472010-02-03 11:58:03 +00004443 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004444 frame->EmitPush(r0);
Andrei Popescu402d9372010-02-26 13:31:12 +00004445 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00004446 break;
4447 }
4448
4449 case KEYED: {
4450 Comment cmnt(masm, "[ Store to keyed Property");
4451 Property* property = expression_->AsProperty();
4452 ASSERT(property != NULL);
4453 cgen_->CodeForSourcePosition(property->position());
4454
4455 // Call IC code.
4456 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
Steve Blocka7e24c12009-10-30 11:49:00 +00004457 frame->EmitPop(r0); // value
Leon Clarke4515c472010-02-03 11:58:03 +00004458 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004459 frame->EmitPush(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00004460 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004461 break;
4462 }
4463
4464 default:
4465 UNREACHABLE();
4466 }
4467}
4468
4469
Leon Clarkee46be812010-01-19 14:06:41 +00004470void FastNewClosureStub::Generate(MacroAssembler* masm) {
4471 // Clone the boilerplate in new space. Set the context to the
4472 // current context in cp.
4473 Label gc;
4474
4475 // Pop the boilerplate function from the stack.
4476 __ pop(r3);
4477
4478 // Attempt to allocate new JSFunction in new space.
4479 __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
4480 r0,
4481 r1,
4482 r2,
4483 &gc,
4484 TAG_OBJECT);
4485
4486 // Compute the function map in the current global context and set that
4487 // as the map of the allocated object.
4488 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4489 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4490 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
4491 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4492
4493 // Clone the rest of the boilerplate fields. We don't have to update
4494 // the write barrier because the allocated object is in new space.
4495 for (int offset = kPointerSize;
4496 offset < JSFunction::kSize;
4497 offset += kPointerSize) {
4498 if (offset == JSFunction::kContextOffset) {
4499 __ str(cp, FieldMemOperand(r0, offset));
4500 } else {
4501 __ ldr(r1, FieldMemOperand(r3, offset));
4502 __ str(r1, FieldMemOperand(r0, offset));
4503 }
4504 }
4505
4506 // Return result. The argument boilerplate has been popped already.
4507 __ Ret();
4508
4509 // Create a new closure through the slower runtime call.
4510 __ bind(&gc);
4511 __ push(cp);
4512 __ push(r3);
4513 __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
4514}
4515
4516
4517void FastNewContextStub::Generate(MacroAssembler* masm) {
4518 // Try to allocate the context in new space.
4519 Label gc;
4520 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
4521
4522 // Attempt to allocate the context in new space.
4523 __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
4524 r0,
4525 r1,
4526 r2,
4527 &gc,
4528 TAG_OBJECT);
4529
4530 // Load the function from the stack.
Andrei Popescu402d9372010-02-26 13:31:12 +00004531 __ ldr(r3, MemOperand(sp, 0));
Leon Clarkee46be812010-01-19 14:06:41 +00004532
4533 // Setup the object header.
4534 __ LoadRoot(r2, Heap::kContextMapRootIndex);
4535 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4536 __ mov(r2, Operand(length));
4537 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
4538
4539 // Setup the fixed slots.
4540 __ mov(r1, Operand(Smi::FromInt(0)));
4541 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
4542 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
4543 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4544 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
4545
4546 // Copy the global object from the surrounding context.
4547 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4548 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
4549
4550 // Initialize the rest of the slots to undefined.
4551 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
4552 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
4553 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
4554 }
4555
4556 // Remove the on-stack argument and return.
4557 __ mov(cp, r0);
4558 __ pop();
4559 __ Ret();
4560
4561 // Need to collect. Call into runtime system.
4562 __ bind(&gc);
4563 __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
4564}
4565
4566
Andrei Popescu402d9372010-02-26 13:31:12 +00004567void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
4568 // Stack layout on entry:
4569 //
4570 // [sp]: constant elements.
4571 // [sp + kPointerSize]: literal index.
4572 // [sp + (2 * kPointerSize)]: literals array.
4573
4574 // All sizes here are multiples of kPointerSize.
4575 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
4576 int size = JSArray::kSize + elements_size;
4577
4578 // Load boilerplate object into r3 and check if we need to create a
4579 // boilerplate.
4580 Label slow_case;
4581 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4582 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
4583 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4584 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
4585 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4586 __ cmp(r3, ip);
4587 __ b(eq, &slow_case);
4588
4589 // Allocate both the JS array and the elements array in one big
4590 // allocation. This avoids multiple limit checks.
4591 __ AllocateInNewSpace(size / kPointerSize,
4592 r0,
4593 r1,
4594 r2,
4595 &slow_case,
4596 TAG_OBJECT);
4597
4598 // Copy the JS array part.
4599 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
4600 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
4601 __ ldr(r1, FieldMemOperand(r3, i));
4602 __ str(r1, FieldMemOperand(r0, i));
4603 }
4604 }
4605
4606 if (length_ > 0) {
4607 // Get hold of the elements array of the boilerplate and setup the
4608 // elements pointer in the resulting object.
4609 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
4610 __ add(r2, r0, Operand(JSArray::kSize));
4611 __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
4612
4613 // Copy the elements array.
4614 for (int i = 0; i < elements_size; i += kPointerSize) {
4615 __ ldr(r1, FieldMemOperand(r3, i));
4616 __ str(r1, FieldMemOperand(r2, i));
4617 }
4618 }
4619
4620 // Return and remove the on-stack parameters.
4621 __ add(sp, sp, Operand(3 * kPointerSize));
4622 __ Ret();
4623
4624 __ bind(&slow_case);
4625 ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
4626 __ TailCallRuntime(runtime, 3, 1);
4627}
4628
4629
Steve Blocka7e24c12009-10-30 11:49:00 +00004630// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4631// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4632// (31 instead of 32).
4633static void CountLeadingZeros(
4634 MacroAssembler* masm,
4635 Register source,
4636 Register scratch,
4637 Register zeros) {
4638#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4639 __ clz(zeros, source); // This instruction is only supported after ARM5.
4640#else
4641 __ mov(zeros, Operand(0));
4642 __ mov(scratch, source);
4643 // Top 16.
4644 __ tst(scratch, Operand(0xffff0000));
4645 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4646 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4647 // Top 8.
4648 __ tst(scratch, Operand(0xff000000));
4649 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4650 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4651 // Top 4.
4652 __ tst(scratch, Operand(0xf0000000));
4653 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4654 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4655 // Top 2.
4656 __ tst(scratch, Operand(0xc0000000));
4657 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4658 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4659 // Top bit.
4660 __ tst(scratch, Operand(0x80000000u));
4661 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4662#endif
4663}
4664
4665
4666// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4667// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4668// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4669// scratch register. Destroys the source register. No GC occurs during this
4670// stub so you don't have to set up the frame.
4671class ConvertToDoubleStub : public CodeStub {
4672 public:
4673 ConvertToDoubleStub(Register result_reg_1,
4674 Register result_reg_2,
4675 Register source_reg,
4676 Register scratch_reg)
4677 : result1_(result_reg_1),
4678 result2_(result_reg_2),
4679 source_(source_reg),
4680 zeros_(scratch_reg) { }
4681
4682 private:
4683 Register result1_;
4684 Register result2_;
4685 Register source_;
4686 Register zeros_;
4687
4688 // Minor key encoding in 16 bits.
4689 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4690 class OpBits: public BitField<Token::Value, 2, 14> {};
4691
4692 Major MajorKey() { return ConvertToDouble; }
4693 int MinorKey() {
4694 // Encode the parameters in a unique 16 bit value.
4695 return result1_.code() +
4696 (result2_.code() << 4) +
4697 (source_.code() << 8) +
4698 (zeros_.code() << 12);
4699 }
4700
4701 void Generate(MacroAssembler* masm);
4702
4703 const char* GetName() { return "ConvertToDoubleStub"; }
4704
4705#ifdef DEBUG
4706 void Print() { PrintF("ConvertToDoubleStub\n"); }
4707#endif
4708};
4709
4710
4711void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4712#ifndef BIG_ENDIAN_FLOATING_POINT
4713 Register exponent = result1_;
4714 Register mantissa = result2_;
4715#else
4716 Register exponent = result2_;
4717 Register mantissa = result1_;
4718#endif
4719 Label not_special;
4720 // Convert from Smi to integer.
4721 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4722 // Move sign bit from source to destination. This works because the sign bit
4723 // in the exponent word of the double has the same position and polarity as
4724 // the 2's complement sign bit in a Smi.
4725 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4726 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4727 // Subtract from 0 if source was negative.
4728 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4729 __ cmp(source_, Operand(1));
4730 __ b(gt, &not_special);
4731
4732 // We have -1, 0 or 1, which we treat specially.
4733 __ cmp(source_, Operand(0));
4734 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4735 static const uint32_t exponent_word_for_1 =
4736 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4737 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4738 // 1, 0 and -1 all have 0 for the second word.
4739 __ mov(mantissa, Operand(0));
4740 __ Ret();
4741
4742 __ bind(&not_special);
4743 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4744 // Gets the wrong answer for 0, but we already checked for that case above.
4745 CountLeadingZeros(masm, source_, mantissa, zeros_);
4746 // Compute exponent and or it into the exponent register.
4747 // We use result2 as a scratch register here.
4748 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4749 __ orr(exponent,
4750 exponent,
4751 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4752 // Shift up the source chopping the top bit off.
4753 __ add(zeros_, zeros_, Operand(1));
4754 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4755 __ mov(source_, Operand(source_, LSL, zeros_));
4756 // Compute lower part of fraction (last 12 bits).
4757 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4758 // And the top (top 20 bits).
4759 __ orr(exponent,
4760 exponent,
4761 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4762 __ Ret();
4763}
4764
4765
4766// This stub can convert a signed int32 to a heap number (double). It does
4767// not work for int32s that are in Smi range! No GC occurs during this stub
4768// so you don't have to set up the frame.
4769class WriteInt32ToHeapNumberStub : public CodeStub {
4770 public:
4771 WriteInt32ToHeapNumberStub(Register the_int,
4772 Register the_heap_number,
4773 Register scratch)
4774 : the_int_(the_int),
4775 the_heap_number_(the_heap_number),
4776 scratch_(scratch) { }
4777
4778 private:
4779 Register the_int_;
4780 Register the_heap_number_;
4781 Register scratch_;
4782
4783 // Minor key encoding in 16 bits.
4784 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4785 class OpBits: public BitField<Token::Value, 2, 14> {};
4786
4787 Major MajorKey() { return WriteInt32ToHeapNumber; }
4788 int MinorKey() {
4789 // Encode the parameters in a unique 16 bit value.
4790 return the_int_.code() +
4791 (the_heap_number_.code() << 4) +
4792 (scratch_.code() << 8);
4793 }
4794
4795 void Generate(MacroAssembler* masm);
4796
4797 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4798
4799#ifdef DEBUG
4800 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4801#endif
4802};
4803
4804
4805// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00004806void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004807 Label max_negative_int;
4808 // the_int_ has the answer which is a signed int32 but not a Smi.
4809 // We test for the special value that has a different exponent. This test
4810 // has the neat side effect of setting the flags according to the sign.
4811 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4812 __ cmp(the_int_, Operand(0x80000000u));
4813 __ b(eq, &max_negative_int);
4814 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4815 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4816 uint32_t non_smi_exponent =
4817 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4818 __ mov(scratch_, Operand(non_smi_exponent));
4819 // Set the sign bit in scratch_ if the value was negative.
4820 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4821 // Subtract from 0 if the value was negative.
4822 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4823 // We should be masking the implict first digit of the mantissa away here,
4824 // but it just ends up combining harmlessly with the last digit of the
4825 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4826 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4827 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4828 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4829 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4830 __ str(scratch_, FieldMemOperand(the_heap_number_,
4831 HeapNumber::kExponentOffset));
4832 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4833 __ str(scratch_, FieldMemOperand(the_heap_number_,
4834 HeapNumber::kMantissaOffset));
4835 __ Ret();
4836
4837 __ bind(&max_negative_int);
4838 // The max negative int32 is stored as a positive number in the mantissa of
4839 // a double because it uses a sign bit instead of using two's complement.
4840 // The actual mantissa bits stored are all 0 because the implicit most
4841 // significant 1 bit is not stored.
4842 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4843 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4844 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4845 __ mov(ip, Operand(0));
4846 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4847 __ Ret();
4848}
4849
4850
4851// Handle the case where the lhs and rhs are the same object.
4852// Equality is almost reflexive (everything but NaN), so this is a test
4853// for "identity and not NaN".
4854static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4855 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00004856 Condition cc,
4857 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004858 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00004859 Label heap_number, return_equal;
4860 Register exp_mask_reg = r5;
Steve Blocka7e24c12009-10-30 11:49:00 +00004861 __ cmp(r0, Operand(r1));
4862 __ b(ne, &not_identical);
4863
Leon Clarkee46be812010-01-19 14:06:41 +00004864 // The two objects are identical. If we know that one of them isn't NaN then
4865 // we now know they test equal.
4866 if (cc != eq || !never_nan_nan) {
4867 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004868
Leon Clarkee46be812010-01-19 14:06:41 +00004869 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4870 // so we do the second best thing - test it ourselves.
4871 // They are both equal and they are not both Smis so both of them are not
4872 // Smis. If it's not a heap number, then return equal.
4873 if (cc == lt || cc == gt) {
4874 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004875 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00004876 } else {
4877 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4878 __ b(eq, &heap_number);
4879 // Comparing JS objects with <=, >= is complicated.
4880 if (cc != eq) {
4881 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4882 __ b(ge, slow);
4883 // Normally here we fall through to return_equal, but undefined is
4884 // special: (undefined == undefined) == true, but
4885 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
4886 if (cc == le || cc == ge) {
4887 __ cmp(r4, Operand(ODDBALL_TYPE));
4888 __ b(ne, &return_equal);
4889 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4890 __ cmp(r0, Operand(r2));
4891 __ b(ne, &return_equal);
4892 if (cc == le) {
4893 // undefined <= undefined should fail.
4894 __ mov(r0, Operand(GREATER));
4895 } else {
4896 // undefined >= undefined should fail.
4897 __ mov(r0, Operand(LESS));
4898 }
4899 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00004900 }
Steve Blockd0582a62009-12-15 09:54:21 +00004901 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004902 }
4903 }
Leon Clarkee46be812010-01-19 14:06:41 +00004904
Steve Blocka7e24c12009-10-30 11:49:00 +00004905 __ bind(&return_equal);
4906 if (cc == lt) {
4907 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4908 } else if (cc == gt) {
4909 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4910 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00004911 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00004912 }
4913 __ mov(pc, Operand(lr)); // Return.
4914
Leon Clarkee46be812010-01-19 14:06:41 +00004915 if (cc != eq || !never_nan_nan) {
4916 // For less and greater we don't have to check for NaN since the result of
4917 // x < x is false regardless. For the others here is some code to check
4918 // for NaN.
4919 if (cc != lt && cc != gt) {
4920 __ bind(&heap_number);
4921 // It is a heap number, so return non-equal if it's NaN and equal if it's
4922 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00004923
Leon Clarkee46be812010-01-19 14:06:41 +00004924 // The representation of NaN values has all exponent bits (52..62) set,
4925 // and not all mantissa bits (0..51) clear.
4926 // Read top bits of double representation (second word of value).
4927 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4928 // Test that exponent bits are all set.
4929 __ and_(r3, r2, Operand(exp_mask_reg));
4930 __ cmp(r3, Operand(exp_mask_reg));
4931 __ b(ne, &return_equal);
4932
4933 // Shift out flag and all exponent bits, retaining only mantissa.
4934 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4935 // Or with all low-bits of mantissa.
4936 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4937 __ orr(r0, r3, Operand(r2), SetCC);
4938 // For equal we already have the right value in r0: Return zero (equal)
4939 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
4940 // not (it's a NaN). For <= and >= we need to load r0 with the failing
4941 // value if it's a NaN.
4942 if (cc != eq) {
4943 // All-zero means Infinity means equal.
4944 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4945 if (cc == le) {
4946 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4947 } else {
4948 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004950 }
Leon Clarkee46be812010-01-19 14:06:41 +00004951 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00004952 }
Leon Clarkee46be812010-01-19 14:06:41 +00004953 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00004954 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004955
4956 __ bind(&not_identical);
4957}
4958
4959
4960// See comment at call site.
4961static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00004962 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00004963 Label* slow,
4964 bool strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004965 Label rhs_is_smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00004966 __ tst(r0, Operand(kSmiTagMask));
Leon Clarked91b9f72010-01-27 17:25:45 +00004967 __ b(eq, &rhs_is_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00004968
Leon Clarked91b9f72010-01-27 17:25:45 +00004969 // Lhs is a Smi. Check whether the rhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00004970 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4971 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004972 // If rhs is not a number and lhs is a Smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00004973 // succeed. Return non-equal (r0 is already not zero)
4974 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4975 } else {
4976 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4977 // the runtime.
4978 __ b(ne, slow);
4979 }
4980
Leon Clarked91b9f72010-01-27 17:25:45 +00004981 // Lhs (r1) is a smi, rhs (r0) is a number.
Steve Blockd0582a62009-12-15 09:54:21 +00004982 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004983 // Convert lhs to a double in d7 .
Steve Blockd0582a62009-12-15 09:54:21 +00004984 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00004985 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
4986 __ vmov(s15, r7);
4987 __ vcvt(d7, s15);
4988 // Load the double from rhs, tagged HeapNumber r0, to d6.
4989 __ sub(r7, r0, Operand(kHeapObjectTag));
4990 __ vldr(d6, r7, HeapNumber::kValueOffset);
Steve Blockd0582a62009-12-15 09:54:21 +00004991 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004992 __ push(lr);
4993 // Convert lhs to a double in r2, r3.
Steve Blockd0582a62009-12-15 09:54:21 +00004994 __ mov(r7, Operand(r1));
4995 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4996 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00004997 // Load rhs to a double in r0, r1.
4998 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4999 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5000 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00005001 }
5002
Steve Blocka7e24c12009-10-30 11:49:00 +00005003 // We now have both loaded as doubles but we can skip the lhs nan check
Leon Clarked91b9f72010-01-27 17:25:45 +00005004 // since it's a smi.
Leon Clarkee46be812010-01-19 14:06:41 +00005005 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00005006
Leon Clarked91b9f72010-01-27 17:25:45 +00005007 __ bind(&rhs_is_smi);
5008 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00005009 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5010 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00005011 // If lhs is not a number and rhs is a smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00005012 // succeed. Return non-equal.
5013 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
5014 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
5015 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00005016 // Smi compared non-strictly with a non-smi non-heap-number. Call
Steve Blocka7e24c12009-10-30 11:49:00 +00005017 // the runtime.
5018 __ b(ne, slow);
5019 }
5020
Leon Clarked91b9f72010-01-27 17:25:45 +00005021 // Rhs (r0) is a smi, lhs (r1) is a heap number.
Steve Blockd0582a62009-12-15 09:54:21 +00005022 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00005023 // Convert rhs to a double in d6 .
Steve Blockd0582a62009-12-15 09:54:21 +00005024 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005025 // Load the double from lhs, tagged HeapNumber r1, to d7.
5026 __ sub(r7, r1, Operand(kHeapObjectTag));
5027 __ vldr(d7, r7, HeapNumber::kValueOffset);
5028 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5029 __ vmov(s13, r7);
5030 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005031 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00005032 __ push(lr);
5033 // Load lhs to a double in r2, r3.
5034 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5035 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5036 // Convert rhs to a double in r0, r1.
Steve Blockd0582a62009-12-15 09:54:21 +00005037 __ mov(r7, Operand(r0));
5038 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5039 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00005040 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00005041 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005042 // Fall through to both_loaded_as_doubles.
5043}
5044
5045
Leon Clarkee46be812010-01-19 14:06:41 +00005046void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005047 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00005048 Register rhs_exponent = exp_first ? r0 : r1;
5049 Register lhs_exponent = exp_first ? r2 : r3;
5050 Register rhs_mantissa = exp_first ? r1 : r0;
5051 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00005052 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00005053 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00005054
5055 Register exp_mask_reg = r5;
5056
5057 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005058 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
5059 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00005060 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00005061 __ mov(r4,
5062 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
5063 SetCC);
5064 __ b(ne, &one_is_nan);
5065 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00005066 __ b(ne, &one_is_nan);
5067
5068 __ bind(lhs_not_nan);
5069 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
5070 __ bind(&lhs_not_nan_exp_mask_is_loaded);
5071 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
5072 __ cmp(r4, Operand(exp_mask_reg));
5073 __ b(ne, &neither_is_nan);
5074 __ mov(r4,
5075 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
5076 SetCC);
5077 __ b(ne, &one_is_nan);
5078 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00005079 __ b(eq, &neither_is_nan);
5080
5081 __ bind(&one_is_nan);
5082 // NaN comparisons always fail.
5083 // Load whatever we need in r0 to make the comparison fail.
5084 if (cc == lt || cc == le) {
5085 __ mov(r0, Operand(GREATER));
5086 } else {
5087 __ mov(r0, Operand(LESS));
5088 }
5089 __ mov(pc, Operand(lr)); // Return.
5090
5091 __ bind(&neither_is_nan);
5092}
5093
5094
5095// See comment at call site.
5096static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
5097 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00005098 Register rhs_exponent = exp_first ? r0 : r1;
5099 Register lhs_exponent = exp_first ? r2 : r3;
5100 Register rhs_mantissa = exp_first ? r1 : r0;
5101 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00005102
5103 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
5104 if (cc == eq) {
5105 // Doubles are not equal unless they have the same bit pattern.
5106 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00005107 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
5108 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00005109 // Return non-zero if the numbers are unequal.
5110 __ mov(pc, Operand(lr), LeaveCC, ne);
5111
Leon Clarkee46be812010-01-19 14:06:41 +00005112 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005113 // If exponents are equal then return 0.
5114 __ mov(pc, Operand(lr), LeaveCC, eq);
5115
5116 // Exponents are unequal. The only way we can return that the numbers
5117 // are equal is if one is -0 and the other is 0. We already dealt
5118 // with the case where both are -0 or both are 0.
5119 // We start by seeing if the mantissas (that are equal) or the bottom
5120 // 31 bits of the rhs exponent are non-zero. If so we return not
5121 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00005122 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005123 __ mov(r0, Operand(r4), LeaveCC, ne);
5124 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
5125 // Now they are equal if and only if the lhs exponent is zero in its
5126 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00005127 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00005128 __ mov(pc, Operand(lr));
5129 } else {
5130 // Call a native function to do a comparison between two non-NaNs.
5131 // Call C routine that may not cause GC or other trouble.
5132 __ mov(r5, Operand(ExternalReference::compare_doubles()));
5133 __ Jump(r5); // Tail call.
5134 }
5135}
5136
5137
5138// See comment at call site.
5139static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
5140 // If either operand is a JSObject or an oddball value, then they are
5141 // not equal since their pointers are different.
5142 // There is no test for undetectability in strict equality.
5143 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
5144 Label first_non_object;
5145 // Get the type of the first operand into r2 and compare it with
5146 // FIRST_JS_OBJECT_TYPE.
5147 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
5148 __ b(lt, &first_non_object);
5149
5150 // Return non-zero (r0 is not zero)
5151 Label return_not_equal;
5152 __ bind(&return_not_equal);
5153 __ mov(pc, Operand(lr)); // Return.
5154
5155 __ bind(&first_non_object);
5156 // Check for oddballs: true, false, null, undefined.
5157 __ cmp(r2, Operand(ODDBALL_TYPE));
5158 __ b(eq, &return_not_equal);
5159
5160 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
5161 __ b(ge, &return_not_equal);
5162
5163 // Check for oddballs: true, false, null, undefined.
5164 __ cmp(r3, Operand(ODDBALL_TYPE));
5165 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00005166
5167 // Now that we have the types we might as well check for symbol-symbol.
5168 // Ensure that no non-strings have the symbol bit set.
5169 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5170 ASSERT(kSymbolTag != 0);
5171 __ and_(r2, r2, Operand(r3));
5172 __ tst(r2, Operand(kIsSymbolMask));
5173 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00005174}
5175
5176
5177// See comment at call site.
5178static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
5179 Label* both_loaded_as_doubles,
5180 Label* not_heap_numbers,
5181 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00005182 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005183 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00005184 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5185 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00005186 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
5187
5188 // Both are heap numbers. Load them up then jump to the code we have
5189 // for that.
Leon Clarked91b9f72010-01-27 17:25:45 +00005190 if (CpuFeatures::IsSupported(VFP3)) {
5191 CpuFeatures::Scope scope(VFP3);
5192 __ sub(r7, r0, Operand(kHeapObjectTag));
5193 __ vldr(d6, r7, HeapNumber::kValueOffset);
5194 __ sub(r7, r1, Operand(kHeapObjectTag));
5195 __ vldr(d7, r7, HeapNumber::kValueOffset);
5196 } else {
5197 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5198 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5199 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5200 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5201 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005202 __ jmp(both_loaded_as_doubles);
5203}
5204
5205
5206// Fast negative check for symbol-to-symbol equality.
5207static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5208 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00005209 // Ensure that no non-strings have the symbol bit set.
5210 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5211 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 __ tst(r2, Operand(kIsSymbolMask));
5213 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00005214 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
5215 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005216 __ tst(r3, Operand(kIsSymbolMask));
5217 __ b(eq, slow);
5218
5219 // Both are symbols. We already checked they weren't the same pointer
5220 // so they are not equal.
5221 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
5222 __ mov(pc, Operand(lr)); // Return.
5223}
5224
5225
Leon Clarked91b9f72010-01-27 17:25:45 +00005226// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
5227// On exit r0 is 0, positive or negative to indicate the result of
5228// the comparison.
Steve Blocka7e24c12009-10-30 11:49:00 +00005229void CompareStub::Generate(MacroAssembler* masm) {
5230 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00005231 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00005232
5233 // NOTICE! This code is only reached after a smi-fast-case check, so
5234 // it is certain that at least one operand isn't a smi.
5235
5236 // Handle the case where the objects are identical. Either returns the answer
5237 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00005238 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005239
5240 // If either is a Smi (we know that not both are), then they can only
5241 // be strictly equal if the other is a HeapNumber.
5242 ASSERT_EQ(0, kSmiTag);
5243 ASSERT_EQ(0, Smi::FromInt(0));
5244 __ and_(r2, r0, Operand(r1));
5245 __ tst(r2, Operand(kSmiTagMask));
5246 __ b(ne, &not_smis);
5247 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5248 // 1) Return the answer.
5249 // 2) Go to slow.
5250 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00005251 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00005252 // In cases 3 and 4 we have found out we were dealing with a number-number
Leon Clarked91b9f72010-01-27 17:25:45 +00005253 // comparison. If VFP3 is supported the double values of the numbers have
5254 // been loaded into d7 and d6. Otherwise, the double values have been loaded
5255 // into r0, r1, r2, and r3.
Leon Clarkee46be812010-01-19 14:06:41 +00005256 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005257
5258 __ bind(&both_loaded_as_doubles);
Leon Clarked91b9f72010-01-27 17:25:45 +00005259 // The arguments have been converted to doubles and stored in d6 and d7, if
5260 // VFP3 is supported, or in r0, r1, r2, and r3.
Steve Blockd0582a62009-12-15 09:54:21 +00005261 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00005262 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00005263 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00005264 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00005265 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00005266 __ vcmp(d7, d6);
5267 __ vmrs(pc); // Move vector status bits to normal status bits.
5268 Label nan;
5269 __ b(vs, &nan);
5270 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
5271 __ mov(r0, Operand(LESS), LeaveCC, lt);
5272 __ mov(r0, Operand(GREATER), LeaveCC, gt);
5273 __ mov(pc, Operand(lr));
5274
5275 __ bind(&nan);
5276 // If one of the sides was a NaN then the v flag is set. Load r0 with
5277 // whatever it takes to make the comparison fail, since comparisons with NaN
5278 // always fail.
5279 if (cc_ == lt || cc_ == le) {
5280 __ mov(r0, Operand(GREATER));
5281 } else {
5282 __ mov(r0, Operand(LESS));
5283 }
Steve Blockd0582a62009-12-15 09:54:21 +00005284 __ mov(pc, Operand(lr));
5285 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00005286 // Checks for NaN in the doubles we have loaded. Can return the answer or
5287 // fall through if neither is a NaN. Also binds lhs_not_nan.
5288 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00005289 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
5290 // answer. Never falls through.
5291 EmitTwoNonNanDoubleComparison(masm, cc_);
5292 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005293
5294 __ bind(&not_smis);
5295 // At this point we know we are dealing with two different objects,
5296 // and neither of them is a Smi. The objects are in r0 and r1.
5297 if (strict_) {
5298 // This returns non-equal for some object types, or falls through if it
5299 // was not lucky.
5300 EmitStrictTwoHeapObjectCompare(masm);
5301 }
5302
5303 Label check_for_symbols;
Leon Clarked91b9f72010-01-27 17:25:45 +00005304 Label flat_string_check;
Steve Blocka7e24c12009-10-30 11:49:00 +00005305 // Check for heap-number-heap-number comparison. Can jump to slow case,
5306 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
5307 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00005308 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00005309 EmitCheckForTwoHeapNumbers(masm,
5310 &both_loaded_as_doubles,
5311 &check_for_symbols,
Leon Clarked91b9f72010-01-27 17:25:45 +00005312 &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005313
5314 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00005315 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
5316 // symbols.
5317 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005318 // Either jumps to slow or returns the answer. Assumes that r2 is the type
5319 // of r0 on entry.
Leon Clarked91b9f72010-01-27 17:25:45 +00005320 EmitCheckForSymbols(masm, &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005321 }
5322
Leon Clarked91b9f72010-01-27 17:25:45 +00005323 // Check for both being sequential ASCII strings, and inline if that is the
5324 // case.
5325 __ bind(&flat_string_check);
5326
5327 __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
5328
5329 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
5330 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
5331 r1,
5332 r0,
5333 r2,
5334 r3,
5335 r4,
5336 r5);
5337 // Never falls through to here.
5338
Steve Blocka7e24c12009-10-30 11:49:00 +00005339 __ bind(&slow);
Leon Clarked91b9f72010-01-27 17:25:45 +00005340
Steve Blocka7e24c12009-10-30 11:49:00 +00005341 __ push(r1);
5342 __ push(r0);
5343 // Figure out which native to call and setup the arguments.
5344 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00005345 if (cc_ == eq) {
5346 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5347 } else {
5348 native = Builtins::COMPARE;
5349 int ncr; // NaN compare result
5350 if (cc_ == lt || cc_ == le) {
5351 ncr = GREATER;
5352 } else {
5353 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
5354 ncr = LESS;
5355 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005356 __ mov(r0, Operand(Smi::FromInt(ncr)));
5357 __ push(r0);
5358 }
5359
5360 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5361 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00005362 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00005363}
5364
5365
5366// Allocates a heap number or jumps to the label if the young space is full and
5367// a scavenge is needed.
5368static void AllocateHeapNumber(
5369 MacroAssembler* masm,
5370 Label* need_gc, // Jump here if young space is full.
5371 Register result, // The tagged address of the new heap number.
5372 Register scratch1, // A scratch register.
5373 Register scratch2) { // Another scratch register.
5374 // Allocate an object in the heap for the heap number and tag it as a heap
5375 // object.
5376 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
5377 result,
5378 scratch1,
5379 scratch2,
5380 need_gc,
5381 TAG_OBJECT);
5382
5383 // Get heap number map and store it in the allocated object.
5384 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
5385 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5386}
5387
5388
5389// We fall into this code if the operands were Smis, but the result was
5390// not (eg. overflow). We branch into this code (to the not_smi label) if
5391// the operands were not both Smi. The operands are in r0 and r1. In order
5392// to call the C-implemented binary fp operation routines we need to end up
5393// with the double precision floating point operands in r0 and r1 (for the
5394// value in r1) and r2 and r3 (for the value in r0).
5395static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5396 Label* not_smi,
5397 const Builtins::JavaScript& builtin,
5398 Token::Value operation,
5399 OverwriteMode mode) {
5400 Label slow, slow_pop_2_first, do_the_call;
5401 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5402 // Smi-smi case (overflow).
5403 // Since both are Smis there is no heap number to overwrite, so allocate.
5404 // The new heap number is in r5. r6 and r7 are scratch.
5405 AllocateHeapNumber(masm, &slow, r5, r6, r7);
Steve Blockd0582a62009-12-15 09:54:21 +00005406
Leon Clarked91b9f72010-01-27 17:25:45 +00005407 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5408 // using registers d7 and d6 for the double values.
5409 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5410 Token::MOD != operation;
5411 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005412 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005413 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5414 __ vmov(s15, r7);
5415 __ vcvt(d7, s15);
5416 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5417 __ vmov(s13, r7);
5418 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005419 } else {
5420 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5421 __ mov(r7, Operand(r0));
5422 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5423 __ push(lr);
5424 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5425 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5426 __ mov(r7, Operand(r1));
5427 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5428 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5429 __ pop(lr);
5430 }
5431
Steve Blocka7e24c12009-10-30 11:49:00 +00005432 __ jmp(&do_the_call); // Tail call. No return.
5433
5434 // We jump to here if something goes wrong (one param is not a number of any
5435 // sort or new-space allocation fails).
5436 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005437
5438 // Push arguments to the stack
Steve Blocka7e24c12009-10-30 11:49:00 +00005439 __ push(r1);
5440 __ push(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00005441
5442 if (Token::ADD == operation) {
5443 // Test for string arguments before calling runtime.
5444 // r1 : first argument
5445 // r0 : second argument
5446 // sp[0] : second argument
Andrei Popescu31002712010-02-23 13:46:05 +00005447 // sp[4] : first argument
Steve Blockd0582a62009-12-15 09:54:21 +00005448
5449 Label not_strings, not_string1, string1;
5450 __ tst(r1, Operand(kSmiTagMask));
5451 __ b(eq, &not_string1);
5452 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
5453 __ b(ge, &not_string1);
5454
5455 // First argument is a a string, test second.
5456 __ tst(r0, Operand(kSmiTagMask));
5457 __ b(eq, &string1);
5458 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5459 __ b(ge, &string1);
5460
5461 // First and second argument are strings.
Andrei Popescu31002712010-02-23 13:46:05 +00005462 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
5463 __ TailCallStub(&stub);
Steve Blockd0582a62009-12-15 09:54:21 +00005464
5465 // Only first argument is a string.
5466 __ bind(&string1);
5467 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
5468
5469 // First argument was not a string, test second.
5470 __ bind(&not_string1);
5471 __ tst(r0, Operand(kSmiTagMask));
5472 __ b(eq, &not_strings);
5473 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5474 __ b(ge, &not_strings);
5475
5476 // Only second argument is a string.
Steve Blockd0582a62009-12-15 09:54:21 +00005477 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5478
5479 __ bind(&not_strings);
5480 }
5481
Steve Blocka7e24c12009-10-30 11:49:00 +00005482 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5483
5484 // We branch here if at least one of r0 and r1 is not a Smi.
5485 __ bind(not_smi);
5486 if (mode == NO_OVERWRITE) {
5487 // In the case where there is no chance of an overwritable float we may as
5488 // well do the allocation immediately while r0 and r1 are untouched.
5489 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5490 }
5491
5492 // Move r0 to a double in r2-r3.
5493 __ tst(r0, Operand(kSmiTagMask));
5494 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5495 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5496 __ b(ne, &slow);
5497 if (mode == OVERWRITE_RIGHT) {
5498 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5499 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005500 if (use_fp_registers) {
5501 CpuFeatures::Scope scope(VFP3);
5502 // Load the double from tagged HeapNumber r0 to d7.
5503 __ sub(r7, r0, Operand(kHeapObjectTag));
5504 __ vldr(d7, r7, HeapNumber::kValueOffset);
5505 } else {
5506 // Calling convention says that second double is in r2 and r3.
5507 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5508 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5509 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005510 __ jmp(&finished_loading_r0);
5511 __ bind(&r0_is_smi);
5512 if (mode == OVERWRITE_RIGHT) {
5513 // We can't overwrite a Smi so get address of new heap number into r5.
5514 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5515 }
Steve Blockd0582a62009-12-15 09:54:21 +00005516
Leon Clarked91b9f72010-01-27 17:25:45 +00005517 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005518 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005519 // Convert smi in r0 to double in d7.
5520 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5521 __ vmov(s15, r7);
5522 __ vcvt(d7, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005523 } else {
5524 // Write Smi from r0 to r3 and r2 in double format.
5525 __ mov(r7, Operand(r0));
5526 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5527 __ push(lr);
5528 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5529 __ pop(lr);
5530 }
5531
Steve Blocka7e24c12009-10-30 11:49:00 +00005532 __ bind(&finished_loading_r0);
5533
5534 // Move r1 to a double in r0-r1.
5535 __ tst(r1, Operand(kSmiTagMask));
5536 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5537 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5538 __ b(ne, &slow);
5539 if (mode == OVERWRITE_LEFT) {
5540 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5541 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005542 if (use_fp_registers) {
5543 CpuFeatures::Scope scope(VFP3);
5544 // Load the double from tagged HeapNumber r1 to d6.
5545 __ sub(r7, r1, Operand(kHeapObjectTag));
5546 __ vldr(d6, r7, HeapNumber::kValueOffset);
5547 } else {
5548 // Calling convention says that first double is in r0 and r1.
5549 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5550 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5551 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005552 __ jmp(&finished_loading_r1);
5553 __ bind(&r1_is_smi);
5554 if (mode == OVERWRITE_LEFT) {
5555 // We can't overwrite a Smi so get address of new heap number into r5.
5556 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5557 }
Steve Blockd0582a62009-12-15 09:54:21 +00005558
Leon Clarked91b9f72010-01-27 17:25:45 +00005559 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005560 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005561 // Convert smi in r1 to double in d6.
5562 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5563 __ vmov(s13, r7);
5564 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005565 } else {
5566 // Write Smi from r1 to r1 and r0 in double format.
5567 __ mov(r7, Operand(r1));
5568 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5569 __ push(lr);
5570 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5571 __ pop(lr);
5572 }
5573
Steve Blocka7e24c12009-10-30 11:49:00 +00005574 __ bind(&finished_loading_r1);
5575
5576 __ bind(&do_the_call);
Leon Clarked91b9f72010-01-27 17:25:45 +00005577 // If we are inlining the operation using VFP3 instructions for
5578 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5579 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005580 CpuFeatures::Scope scope(VFP3);
5581 // ARMv7 VFP3 instructions to implement
5582 // double precision, add, subtract, multiply, divide.
Steve Blockd0582a62009-12-15 09:54:21 +00005583
5584 if (Token::MUL == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005585 __ vmul(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005586 } else if (Token::DIV == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005587 __ vdiv(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005588 } else if (Token::ADD == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005589 __ vadd(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005590 } else if (Token::SUB == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005591 __ vsub(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005592 } else {
5593 UNREACHABLE();
5594 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005595 __ sub(r0, r5, Operand(kHeapObjectTag));
5596 __ vstr(d5, r0, HeapNumber::kValueOffset);
5597 __ add(r0, r0, Operand(kHeapObjectTag));
Steve Blockd0582a62009-12-15 09:54:21 +00005598 __ mov(pc, lr);
5599 return;
5600 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005601
5602 // If we did not inline the operation, then the arguments are in:
5603 // r0: Left value (least significant part of mantissa).
5604 // r1: Left value (sign, exponent, top of mantissa).
5605 // r2: Right value (least significant part of mantissa).
5606 // r3: Right value (sign, exponent, top of mantissa).
5607 // r5: Address of heap number for result.
5608
Steve Blocka7e24c12009-10-30 11:49:00 +00005609 __ push(lr); // For later.
5610 __ push(r5); // Address of heap number that is answer.
5611 __ AlignStack(0);
5612 // Call C routine that may not cause GC or other trouble.
5613 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5614 __ Call(r5);
5615 __ pop(r4); // Address of heap number.
5616 __ cmp(r4, Operand(Smi::FromInt(0)));
5617 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5618 // Store answer in the overwritable heap number.
5619#if !defined(USE_ARM_EABI)
5620 // Double returned in fp coprocessor register 0 and 1, encoded as register
5621 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5622 // substract the tag from r4.
5623 __ sub(r5, r4, Operand(kHeapObjectTag));
5624 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5625#else
5626 // Double returned in registers 0 and 1.
5627 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5628 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5629#endif
5630 __ mov(r0, Operand(r4));
5631 // And we are done.
5632 __ pop(pc);
5633}
5634
5635
5636// Tries to get a signed int32 out of a double precision floating point heap
5637// number. Rounds towards 0. Fastest for doubles that are in the ranges
5638// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5639// almost to the range of signed int32 values that are not Smis. Jumps to the
5640// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5641// (excluding the endpoints).
5642static void GetInt32(MacroAssembler* masm,
5643 Register source,
5644 Register dest,
5645 Register scratch,
5646 Register scratch2,
5647 Label* slow) {
5648 Label right_exponent, done;
5649 // Get exponent word.
5650 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5651 // Get exponent alone in scratch2.
5652 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5653 // Load dest with zero. We use this either for the final shift or
5654 // for the answer.
5655 __ mov(dest, Operand(0));
5656 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5657 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5658 // the exponent that we are fastest at and also the highest exponent we can
5659 // handle here.
5660 const uint32_t non_smi_exponent =
5661 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5662 __ cmp(scratch2, Operand(non_smi_exponent));
5663 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5664 __ b(eq, &right_exponent);
5665 // If the exponent is higher than that then go to slow case. This catches
5666 // numbers that don't fit in a signed int32, infinities and NaNs.
5667 __ b(gt, slow);
5668
5669 // We know the exponent is smaller than 30 (biased). If it is less than
5670 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5671 // it rounds to zero.
5672 const uint32_t zero_exponent =
5673 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5674 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5675 // Dest already has a Smi zero.
5676 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00005677 if (!CpuFeatures::IsSupported(VFP3)) {
5678 // We have a shifted exponent between 0 and 30 in scratch2.
5679 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5680 // We now have the exponent in dest. Subtract from 30 to get
5681 // how much to shift down.
5682 __ rsb(dest, dest, Operand(30));
5683 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005684 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00005685 if (CpuFeatures::IsSupported(VFP3)) {
5686 CpuFeatures::Scope scope(VFP3);
5687 // ARMv7 VFP3 instructions implementing double precision to integer
5688 // conversion using round to zero.
5689 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005690 __ vmov(d7, scratch2, scratch);
5691 __ vcvt(s15, d7);
5692 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005693 } else {
5694 // Get the top bits of the mantissa.
5695 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5696 // Put back the implicit 1.
5697 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5698 // Shift up the mantissa bits to take up the space the exponent used to
5699 // take. We just orred in the implicit bit so that took care of one and
5700 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
5701 // distance.
5702 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5703 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5704 // Put sign in zero flag.
5705 __ tst(scratch, Operand(HeapNumber::kSignMask));
5706 // Get the second half of the double. For some exponents we don't
5707 // actually need this because the bits get shifted out again, but
5708 // it's probably slower to test than just to do it.
5709 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5710 // Shift down 22 bits to get the last 10 bits.
5711 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5712 // Move down according to the exponent.
5713 __ mov(dest, Operand(scratch, LSR, dest));
5714 // Fix sign if sign bit was set.
5715 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5716 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005717 __ bind(&done);
5718}
5719
Steve Blocka7e24c12009-10-30 11:49:00 +00005720// For bitwise ops where the inputs are not both Smis we here try to determine
5721// whether both inputs are either Smis or at least heap numbers that can be
5722// represented by a 32 bit signed value. We truncate towards zero as required
5723// by the ES spec. If this is the case we do the bitwise op and see if the
5724// result is a Smi. If so, great, otherwise we try to find a heap number to
5725// write the answer into (either by allocating or by overwriting).
5726// On entry the operands are in r0 and r1. On exit the answer is in r0.
5727void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5728 Label slow, result_not_a_smi;
5729 Label r0_is_smi, r1_is_smi;
5730 Label done_checking_r0, done_checking_r1;
5731
5732 __ tst(r1, Operand(kSmiTagMask));
5733 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5734 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5735 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005736 GetInt32(masm, r1, r3, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005737 __ jmp(&done_checking_r1);
5738 __ bind(&r1_is_smi);
5739 __ mov(r3, Operand(r1, ASR, 1));
5740 __ bind(&done_checking_r1);
5741
5742 __ tst(r0, Operand(kSmiTagMask));
5743 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5744 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5745 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005746 GetInt32(masm, r0, r2, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005747 __ jmp(&done_checking_r0);
5748 __ bind(&r0_is_smi);
5749 __ mov(r2, Operand(r0, ASR, 1));
5750 __ bind(&done_checking_r0);
5751
5752 // r0 and r1: Original operands (Smi or heap numbers).
5753 // r2 and r3: Signed int32 operands.
5754 switch (op_) {
5755 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5756 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5757 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5758 case Token::SAR:
5759 // Use only the 5 least significant bits of the shift count.
5760 __ and_(r2, r2, Operand(0x1f));
5761 __ mov(r2, Operand(r3, ASR, r2));
5762 break;
5763 case Token::SHR:
5764 // Use only the 5 least significant bits of the shift count.
5765 __ and_(r2, r2, Operand(0x1f));
5766 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5767 // SHR is special because it is required to produce a positive answer.
5768 // The code below for writing into heap numbers isn't capable of writing
5769 // the register as an unsigned int so we go to slow case if we hit this
5770 // case.
5771 __ b(mi, &slow);
5772 break;
5773 case Token::SHL:
5774 // Use only the 5 least significant bits of the shift count.
5775 __ and_(r2, r2, Operand(0x1f));
5776 __ mov(r2, Operand(r3, LSL, r2));
5777 break;
5778 default: UNREACHABLE();
5779 }
5780 // check that the *signed* result fits in a smi
5781 __ add(r3, r2, Operand(0x40000000), SetCC);
5782 __ b(mi, &result_not_a_smi);
5783 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5784 __ Ret();
5785
5786 Label have_to_allocate, got_a_heap_number;
5787 __ bind(&result_not_a_smi);
5788 switch (mode_) {
5789 case OVERWRITE_RIGHT: {
5790 __ tst(r0, Operand(kSmiTagMask));
5791 __ b(eq, &have_to_allocate);
5792 __ mov(r5, Operand(r0));
5793 break;
5794 }
5795 case OVERWRITE_LEFT: {
5796 __ tst(r1, Operand(kSmiTagMask));
5797 __ b(eq, &have_to_allocate);
5798 __ mov(r5, Operand(r1));
5799 break;
5800 }
5801 case NO_OVERWRITE: {
5802 // Get a new heap number in r5. r6 and r7 are scratch.
5803 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5804 }
5805 default: break;
5806 }
5807 __ bind(&got_a_heap_number);
5808 // r2: Answer as signed int32.
5809 // r5: Heap number to write answer into.
5810
5811 // Nothing can go wrong now, so move the heap number to r0, which is the
5812 // result.
5813 __ mov(r0, Operand(r5));
5814
5815 // Tail call that writes the int32 in r2 to the heap number in r0, using
5816 // r3 as scratch. r0 is preserved and returned.
5817 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5818 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5819
5820 if (mode_ != NO_OVERWRITE) {
5821 __ bind(&have_to_allocate);
5822 // Get a new heap number in r5. r6 and r7 are scratch.
5823 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5824 __ jmp(&got_a_heap_number);
5825 }
5826
5827 // If all else failed then we go to the runtime system.
5828 __ bind(&slow);
5829 __ push(r1); // restore stack
5830 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005831 switch (op_) {
5832 case Token::BIT_OR:
5833 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5834 break;
5835 case Token::BIT_AND:
5836 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5837 break;
5838 case Token::BIT_XOR:
5839 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5840 break;
5841 case Token::SAR:
5842 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5843 break;
5844 case Token::SHR:
5845 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5846 break;
5847 case Token::SHL:
5848 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5849 break;
5850 default:
5851 UNREACHABLE();
5852 }
5853}
5854
5855
5856// Can we multiply by x with max two shifts and an add.
5857// This answers yes to all integers from 2 to 10.
5858static bool IsEasyToMultiplyBy(int x) {
5859 if (x < 2) return false; // Avoid special cases.
5860 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5861 if (IsPowerOf2(x)) return true; // Simple shift.
5862 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5863 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5864 return false;
5865}
5866
5867
5868// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5869// Source and destination may be the same register. This routine does
5870// not set carry and overflow the way a mul instruction would.
5871static void MultiplyByKnownInt(MacroAssembler* masm,
5872 Register source,
5873 Register destination,
5874 int known_int) {
5875 if (IsPowerOf2(known_int)) {
5876 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5877 } else if (PopCountLessThanEqual2(known_int)) {
5878 int first_bit = BitPosition(known_int);
5879 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5880 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5881 if (first_bit != 0) {
5882 __ mov(destination, Operand(destination, LSL, first_bit));
5883 }
5884 } else {
5885 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5886 int the_bit = BitPosition(known_int + 1);
5887 __ rsb(destination, source, Operand(source, LSL, the_bit));
5888 }
5889}
5890
5891
5892// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5893// a register for the cases where it doesn't know a good trick, and may deliver
5894// a result that needs shifting.
5895static void MultiplyByKnownInt2(
5896 MacroAssembler* masm,
5897 Register result,
5898 Register source,
5899 Register known_int_register, // Smi tagged.
5900 int known_int,
5901 int* required_shift) { // Including Smi tag shift
5902 switch (known_int) {
5903 case 3:
5904 __ add(result, source, Operand(source, LSL, 1));
5905 *required_shift = 1;
5906 break;
5907 case 5:
5908 __ add(result, source, Operand(source, LSL, 2));
5909 *required_shift = 1;
5910 break;
5911 case 6:
5912 __ add(result, source, Operand(source, LSL, 1));
5913 *required_shift = 2;
5914 break;
5915 case 7:
5916 __ rsb(result, source, Operand(source, LSL, 3));
5917 *required_shift = 1;
5918 break;
5919 case 9:
5920 __ add(result, source, Operand(source, LSL, 3));
5921 *required_shift = 1;
5922 break;
5923 case 10:
5924 __ add(result, source, Operand(source, LSL, 2));
5925 *required_shift = 2;
5926 break;
5927 default:
5928 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5929 __ mul(result, source, known_int_register);
5930 *required_shift = 0;
5931 }
5932}
5933
5934
Leon Clarkee46be812010-01-19 14:06:41 +00005935const char* GenericBinaryOpStub::GetName() {
5936 if (name_ != NULL) return name_;
5937 const int len = 100;
5938 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
5939 if (name_ == NULL) return "OOM";
5940 const char* op_name = Token::Name(op_);
5941 const char* overwrite_name;
5942 switch (mode_) {
5943 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
5944 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
5945 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
5946 default: overwrite_name = "UnknownOverwrite"; break;
5947 }
5948
5949 OS::SNPrintF(Vector<char>(name_, len),
5950 "GenericBinaryOpStub_%s_%s%s",
5951 op_name,
5952 overwrite_name,
5953 specialized_on_rhs_ ? "_ConstantRhs" : 0);
5954 return name_;
5955}
5956
5957
Andrei Popescu31002712010-02-23 13:46:05 +00005958
Steve Blocka7e24c12009-10-30 11:49:00 +00005959void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5960 // r1 : x
5961 // r0 : y
5962 // result : r0
5963
5964 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5965 // tell us that.
5966 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5967
5968 switch (op_) {
5969 case Token::ADD: {
5970 Label not_smi;
5971 // Fast path.
5972 ASSERT(kSmiTag == 0); // Adjust code below.
5973 __ tst(r2, Operand(kSmiTagMask));
5974 __ b(ne, &not_smi);
5975 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5976 // Return if no overflow.
5977 __ Ret(vc);
5978 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5979
5980 HandleBinaryOpSlowCases(masm,
5981 &not_smi,
5982 Builtins::ADD,
5983 Token::ADD,
5984 mode_);
5985 break;
5986 }
5987
5988 case Token::SUB: {
5989 Label not_smi;
5990 // Fast path.
5991 ASSERT(kSmiTag == 0); // Adjust code below.
5992 __ tst(r2, Operand(kSmiTagMask));
5993 __ b(ne, &not_smi);
5994 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5995 // Return if no overflow.
5996 __ Ret(vc);
5997 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5998
5999 HandleBinaryOpSlowCases(masm,
6000 &not_smi,
6001 Builtins::SUB,
6002 Token::SUB,
6003 mode_);
6004 break;
6005 }
6006
6007 case Token::MUL: {
6008 Label not_smi, slow;
6009 ASSERT(kSmiTag == 0); // adjust code below
6010 __ tst(r2, Operand(kSmiTagMask));
6011 __ b(ne, &not_smi);
6012 // Remove tag from one operand (but keep sign), so that result is Smi.
6013 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
6014 // Do multiplication
6015 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
6016 // Go slow on overflows (overflow bit is not set).
6017 __ mov(ip, Operand(r3, ASR, 31));
6018 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
6019 __ b(ne, &slow);
6020 // Go slow on zero result to handle -0.
6021 __ tst(r3, Operand(r3));
6022 __ mov(r0, Operand(r3), LeaveCC, ne);
6023 __ Ret(ne);
6024 // We need -0 if we were multiplying a negative number with 0 to get 0.
6025 // We know one of them was zero.
6026 __ add(r2, r0, Operand(r1), SetCC);
6027 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
6028 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
6029 // Slow case. We fall through here if we multiplied a negative number
6030 // with 0, because that would mean we should produce -0.
6031 __ bind(&slow);
6032
6033 HandleBinaryOpSlowCases(masm,
6034 &not_smi,
6035 Builtins::MUL,
6036 Token::MUL,
6037 mode_);
6038 break;
6039 }
6040
6041 case Token::DIV:
6042 case Token::MOD: {
6043 Label not_smi;
6044 if (specialized_on_rhs_) {
6045 Label smi_is_unsuitable;
6046 __ BranchOnNotSmi(r1, &not_smi);
6047 if (IsPowerOf2(constant_rhs_)) {
6048 if (op_ == Token::MOD) {
6049 __ and_(r0,
6050 r1,
6051 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
6052 SetCC);
6053 // We now have the answer, but if the input was negative we also
6054 // have the sign bit. Our work is done if the result is
6055 // positive or zero:
6056 __ Ret(pl);
6057 // A mod of a negative left hand side must return a negative number.
6058 // Unfortunately if the answer is 0 then we must return -0. And we
6059 // already optimistically trashed r0 so we may need to restore it.
6060 __ eor(r0, r0, Operand(0x80000000u), SetCC);
6061 // Next two instructions are conditional on the answer being -0.
6062 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
6063 __ b(eq, &smi_is_unsuitable);
6064 // We need to subtract the dividend. Eg. -3 % 4 == -3.
6065 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
6066 } else {
6067 ASSERT(op_ == Token::DIV);
6068 __ tst(r1,
6069 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
6070 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
6071 int shift = 0;
6072 int d = constant_rhs_;
6073 while ((d & 1) == 0) {
6074 d >>= 1;
6075 shift++;
6076 }
6077 __ mov(r0, Operand(r1, LSR, shift));
6078 __ bic(r0, r0, Operand(kSmiTagMask));
6079 }
6080 } else {
6081 // Not a power of 2.
6082 __ tst(r1, Operand(0x80000000u));
6083 __ b(ne, &smi_is_unsuitable);
6084 // Find a fixed point reciprocal of the divisor so we can divide by
6085 // multiplying.
6086 double divisor = 1.0 / constant_rhs_;
6087 int shift = 32;
6088 double scale = 4294967296.0; // 1 << 32.
6089 uint32_t mul;
6090 // Maximise the precision of the fixed point reciprocal.
6091 while (true) {
6092 mul = static_cast<uint32_t>(scale * divisor);
6093 if (mul >= 0x7fffffff) break;
6094 scale *= 2.0;
6095 shift++;
6096 }
6097 mul++;
6098 __ mov(r2, Operand(mul));
6099 __ umull(r3, r2, r2, r1);
6100 __ mov(r2, Operand(r2, LSR, shift - 31));
6101 // r2 is r1 / rhs. r2 is not Smi tagged.
6102 // r0 is still the known rhs. r0 is Smi tagged.
6103 // r1 is still the unkown lhs. r1 is Smi tagged.
6104 int required_r4_shift = 0; // Including the Smi tag shift of 1.
6105 // r4 = r2 * r0.
6106 MultiplyByKnownInt2(masm,
6107 r4,
6108 r2,
6109 r0,
6110 constant_rhs_,
6111 &required_r4_shift);
6112 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
6113 if (op_ == Token::DIV) {
6114 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
6115 __ b(ne, &smi_is_unsuitable); // There was a remainder.
6116 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
6117 } else {
6118 ASSERT(op_ == Token::MOD);
6119 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
6120 }
6121 }
6122 __ Ret();
6123 __ bind(&smi_is_unsuitable);
6124 } else {
6125 __ jmp(&not_smi);
6126 }
6127 HandleBinaryOpSlowCases(masm,
6128 &not_smi,
6129 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
6130 op_,
6131 mode_);
6132 break;
6133 }
6134
6135 case Token::BIT_OR:
6136 case Token::BIT_AND:
6137 case Token::BIT_XOR:
6138 case Token::SAR:
6139 case Token::SHR:
6140 case Token::SHL: {
6141 Label slow;
6142 ASSERT(kSmiTag == 0); // adjust code below
6143 __ tst(r2, Operand(kSmiTagMask));
6144 __ b(ne, &slow);
6145 switch (op_) {
6146 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
6147 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
6148 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
6149 case Token::SAR:
6150 // Remove tags from right operand.
Andrei Popescu31002712010-02-23 13:46:05 +00006151 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006152 __ mov(r0, Operand(r1, ASR, r2));
6153 // Smi tag result.
6154 __ bic(r0, r0, Operand(kSmiTagMask));
6155 break;
6156 case Token::SHR:
6157 // Remove tags from operands. We can't do this on a 31 bit number
6158 // because then the 0s get shifted into bit 30 instead of bit 31.
6159 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
Andrei Popescu31002712010-02-23 13:46:05 +00006160 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006161 __ mov(r3, Operand(r3, LSR, r2));
6162 // Unsigned shift is not allowed to produce a negative number, so
6163 // check the sign bit and the sign bit after Smi tagging.
6164 __ tst(r3, Operand(0xc0000000));
6165 __ b(ne, &slow);
6166 // Smi tag result.
6167 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6168 break;
6169 case Token::SHL:
6170 // Remove tags from operands.
6171 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
Andrei Popescu31002712010-02-23 13:46:05 +00006172 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006173 __ mov(r3, Operand(r3, LSL, r2));
6174 // Check that the signed result fits in a Smi.
6175 __ add(r2, r3, Operand(0x40000000), SetCC);
6176 __ b(mi, &slow);
6177 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6178 break;
6179 default: UNREACHABLE();
6180 }
6181 __ Ret();
6182 __ bind(&slow);
6183 HandleNonSmiBitwiseOp(masm);
6184 break;
6185 }
6186
6187 default: UNREACHABLE();
6188 }
6189 // This code should be unreachable.
6190 __ stop("Unreachable");
6191}
6192
6193
6194void StackCheckStub::Generate(MacroAssembler* masm) {
6195 // Do tail-call to runtime routine. Runtime routines expect at least one
6196 // argument, so give it a Smi.
6197 __ mov(r0, Operand(Smi::FromInt(0)));
6198 __ push(r0);
6199 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
6200
6201 __ StubReturn(1);
6202}
6203
6204
Leon Clarkee46be812010-01-19 14:06:41 +00006205void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Leon Clarke4515c472010-02-03 11:58:03 +00006206 Label slow, done;
Leon Clarkee46be812010-01-19 14:06:41 +00006207
Leon Clarke4515c472010-02-03 11:58:03 +00006208 if (op_ == Token::SUB) {
6209 // Check whether the value is a smi.
6210 Label try_float;
6211 __ tst(r0, Operand(kSmiTagMask));
6212 __ b(ne, &try_float);
Steve Blocka7e24c12009-10-30 11:49:00 +00006213
Leon Clarke4515c472010-02-03 11:58:03 +00006214 // Go slow case if the value of the expression is zero
6215 // to make sure that we switch between 0 and -0.
6216 __ cmp(r0, Operand(0));
6217 __ b(eq, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006218
Leon Clarke4515c472010-02-03 11:58:03 +00006219 // The value of the expression is a smi that is not zero. Try
6220 // optimistic subtraction '0 - value'.
6221 __ rsb(r1, r0, Operand(0), SetCC);
6222 __ b(vs, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006223
Leon Clarke4515c472010-02-03 11:58:03 +00006224 __ mov(r0, Operand(r1)); // Set r0 to result.
6225 __ b(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006226
Leon Clarke4515c472010-02-03 11:58:03 +00006227 __ bind(&try_float);
6228 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6229 __ b(ne, &slow);
6230 // r0 is a heap number. Get a new heap number in r1.
6231 if (overwrite_) {
6232 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6233 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6234 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6235 } else {
6236 AllocateHeapNumber(masm, &slow, r1, r2, r3);
6237 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6238 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6239 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
6240 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6241 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
6242 __ mov(r0, Operand(r1));
6243 }
6244 } else if (op_ == Token::BIT_NOT) {
6245 // Check if the operand is a heap number.
6246 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6247 __ b(ne, &slow);
6248
6249 // Convert the heap number is r0 to an untagged integer in r1.
6250 GetInt32(masm, r0, r1, r2, r3, &slow);
6251
6252 // Do the bitwise operation (move negated) and check if the result
6253 // fits in a smi.
6254 Label try_float;
6255 __ mvn(r1, Operand(r1));
6256 __ add(r2, r1, Operand(0x40000000), SetCC);
6257 __ b(mi, &try_float);
6258 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
6259 __ b(&done);
6260
6261 __ bind(&try_float);
6262 if (!overwrite_) {
6263 // Allocate a fresh heap number, but don't overwrite r0 until
6264 // we're sure we can do it without going through the slow case
6265 // that needs the value in r0.
6266 AllocateHeapNumber(masm, &slow, r2, r3, r4);
6267 __ mov(r0, Operand(r2));
6268 }
6269
6270 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
6271 // have to set up a frame.
6272 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
6273 __ push(lr);
6274 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
6275 __ pop(lr);
6276 } else {
6277 UNIMPLEMENTED();
6278 }
6279
6280 __ bind(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006281 __ StubReturn(1);
6282
Leon Clarke4515c472010-02-03 11:58:03 +00006283 // Handle the slow case by jumping to the JavaScript builtin.
Steve Blocka7e24c12009-10-30 11:49:00 +00006284 __ bind(&slow);
6285 __ push(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00006286 switch (op_) {
6287 case Token::SUB:
6288 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
6289 break;
6290 case Token::BIT_NOT:
6291 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
6292 break;
6293 default:
6294 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00006295 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006296}
6297
6298
6299void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6300 // r0 holds the exception.
6301
6302 // Adjust this code if not the case.
6303 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6304
6305 // Drop the sp to the top of the handler.
6306 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6307 __ ldr(sp, MemOperand(r3));
6308
6309 // Restore the next handler and frame pointer, discard handler state.
6310 ASSERT(StackHandlerConstants::kNextOffset == 0);
6311 __ pop(r2);
6312 __ str(r2, MemOperand(r3));
6313 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6314 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
6315
6316 // Before returning we restore the context from the frame pointer if
6317 // not NULL. The frame pointer is NULL in the exception handler of a
6318 // JS entry frame.
6319 __ cmp(fp, Operand(0));
6320 // Set cp to NULL if fp is NULL.
6321 __ mov(cp, Operand(0), LeaveCC, eq);
6322 // Restore cp otherwise.
6323 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6324#ifdef DEBUG
6325 if (FLAG_debug_code) {
6326 __ mov(lr, Operand(pc));
6327 }
6328#endif
6329 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6330 __ pop(pc);
6331}
6332
6333
6334void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6335 UncatchableExceptionType type) {
6336 // Adjust this code if not the case.
6337 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6338
6339 // Drop sp to the top stack handler.
6340 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6341 __ ldr(sp, MemOperand(r3));
6342
6343 // Unwind the handlers until the ENTRY handler is found.
6344 Label loop, done;
6345 __ bind(&loop);
6346 // Load the type of the current stack handler.
6347 const int kStateOffset = StackHandlerConstants::kStateOffset;
6348 __ ldr(r2, MemOperand(sp, kStateOffset));
6349 __ cmp(r2, Operand(StackHandler::ENTRY));
6350 __ b(eq, &done);
6351 // Fetch the next handler in the list.
6352 const int kNextOffset = StackHandlerConstants::kNextOffset;
6353 __ ldr(sp, MemOperand(sp, kNextOffset));
6354 __ jmp(&loop);
6355 __ bind(&done);
6356
6357 // Set the top handler address to next handler past the current ENTRY handler.
6358 ASSERT(StackHandlerConstants::kNextOffset == 0);
6359 __ pop(r2);
6360 __ str(r2, MemOperand(r3));
6361
6362 if (type == OUT_OF_MEMORY) {
6363 // Set external caught exception to false.
6364 ExternalReference external_caught(Top::k_external_caught_exception_address);
6365 __ mov(r0, Operand(false));
6366 __ mov(r2, Operand(external_caught));
6367 __ str(r0, MemOperand(r2));
6368
6369 // Set pending exception and r0 to out of memory exception.
6370 Failure* out_of_memory = Failure::OutOfMemoryException();
6371 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6372 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
6373 __ str(r0, MemOperand(r2));
6374 }
6375
6376 // Stack layout at this point. See also StackHandlerConstants.
6377 // sp -> state (ENTRY)
6378 // fp
6379 // lr
6380
6381 // Discard handler state (r2 is not used) and restore frame pointer.
6382 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6383 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
6384 // Before returning we restore the context from the frame pointer if
6385 // not NULL. The frame pointer is NULL in the exception handler of a
6386 // JS entry frame.
6387 __ cmp(fp, Operand(0));
6388 // Set cp to NULL if fp is NULL.
6389 __ mov(cp, Operand(0), LeaveCC, eq);
6390 // Restore cp otherwise.
6391 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6392#ifdef DEBUG
6393 if (FLAG_debug_code) {
6394 __ mov(lr, Operand(pc));
6395 }
6396#endif
6397 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6398 __ pop(pc);
6399}
6400
6401
6402void CEntryStub::GenerateCore(MacroAssembler* masm,
6403 Label* throw_normal_exception,
6404 Label* throw_termination_exception,
6405 Label* throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006406 bool do_gc,
6407 bool always_allocate) {
6408 // r0: result parameter for PerformGC, if any
6409 // r4: number of arguments including receiver (C callee-saved)
6410 // r5: pointer to builtin function (C callee-saved)
6411 // r6: pointer to the first argument (C callee-saved)
6412
6413 if (do_gc) {
6414 // Passing r0.
6415 ExternalReference gc_reference = ExternalReference::perform_gc_function();
6416 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
6417 }
6418
6419 ExternalReference scope_depth =
6420 ExternalReference::heap_always_allocate_scope_depth();
6421 if (always_allocate) {
6422 __ mov(r0, Operand(scope_depth));
6423 __ ldr(r1, MemOperand(r0));
6424 __ add(r1, r1, Operand(1));
6425 __ str(r1, MemOperand(r0));
6426 }
6427
6428 // Call C built-in.
6429 // r0 = argc, r1 = argv
6430 __ mov(r0, Operand(r4));
6431 __ mov(r1, Operand(r6));
6432
6433 // TODO(1242173): To let the GC traverse the return address of the exit
6434 // frames, we need to know where the return address is. Right now,
6435 // we push it on the stack to be able to find it again, but we never
6436 // restore from it in case of changes, which makes it impossible to
6437 // support moving the C entry code stub. This should be fixed, but currently
6438 // this is OK because the CEntryStub gets generated so early in the V8 boot
6439 // sequence that it is not moving ever.
6440 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
6441 masm->push(lr);
6442 masm->Jump(r5);
6443
6444 if (always_allocate) {
6445 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
6446 // though (contain the result).
6447 __ mov(r2, Operand(scope_depth));
6448 __ ldr(r3, MemOperand(r2));
6449 __ sub(r3, r3, Operand(1));
6450 __ str(r3, MemOperand(r2));
6451 }
6452
6453 // check for failure result
6454 Label failure_returned;
6455 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6456 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
6457 __ add(r2, r0, Operand(1));
6458 __ tst(r2, Operand(kFailureTagMask));
6459 __ b(eq, &failure_returned);
6460
6461 // Exit C frame and return.
6462 // r0:r1: result
6463 // sp: stack pointer
6464 // fp: frame pointer
Leon Clarke4515c472010-02-03 11:58:03 +00006465 __ LeaveExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006466
6467 // check if we should retry or throw exception
6468 Label retry;
6469 __ bind(&failure_returned);
6470 ASSERT(Failure::RETRY_AFTER_GC == 0);
6471 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6472 __ b(eq, &retry);
6473
6474 // Special handling of out of memory exceptions.
6475 Failure* out_of_memory = Failure::OutOfMemoryException();
6476 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6477 __ b(eq, throw_out_of_memory_exception);
6478
6479 // Retrieve the pending exception and clear the variable.
6480 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6481 __ ldr(r3, MemOperand(ip));
6482 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6483 __ ldr(r0, MemOperand(ip));
6484 __ str(r3, MemOperand(ip));
6485
6486 // Special handling of termination exceptions which are uncatchable
6487 // by javascript code.
6488 __ cmp(r0, Operand(Factory::termination_exception()));
6489 __ b(eq, throw_termination_exception);
6490
6491 // Handle normal exception.
6492 __ jmp(throw_normal_exception);
6493
6494 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
6495}
6496
6497
Leon Clarke4515c472010-02-03 11:58:03 +00006498void CEntryStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006499 // Called from JavaScript; parameters are on stack as if calling JS function
6500 // r0: number of arguments including receiver
6501 // r1: pointer to builtin function
6502 // fp: frame pointer (restored after C call)
6503 // sp: stack pointer (restored as callee's sp after C call)
6504 // cp: current context (C callee-saved)
6505
Leon Clarke4515c472010-02-03 11:58:03 +00006506 // Result returned in r0 or r0+r1 by default.
6507
Steve Blocka7e24c12009-10-30 11:49:00 +00006508 // NOTE: Invocations of builtins may return failure objects
6509 // instead of a proper result. The builtin entry handles
6510 // this by performing a garbage collection and retrying the
6511 // builtin once.
6512
Steve Blocka7e24c12009-10-30 11:49:00 +00006513 // Enter the exit frame that transitions from JavaScript to C++.
Leon Clarke4515c472010-02-03 11:58:03 +00006514 __ EnterExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006515
6516 // r4: number of arguments (C callee-saved)
6517 // r5: pointer to builtin function (C callee-saved)
6518 // r6: pointer to first argument (C callee-saved)
6519
6520 Label throw_normal_exception;
6521 Label throw_termination_exception;
6522 Label throw_out_of_memory_exception;
6523
6524 // Call into the runtime system.
6525 GenerateCore(masm,
6526 &throw_normal_exception,
6527 &throw_termination_exception,
6528 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006529 false,
6530 false);
6531
6532 // Do space-specific GC and retry runtime call.
6533 GenerateCore(masm,
6534 &throw_normal_exception,
6535 &throw_termination_exception,
6536 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006537 true,
6538 false);
6539
6540 // Do full GC and retry runtime call one final time.
6541 Failure* failure = Failure::InternalError();
6542 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
6543 GenerateCore(masm,
6544 &throw_normal_exception,
6545 &throw_termination_exception,
6546 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006547 true,
6548 true);
6549
6550 __ bind(&throw_out_of_memory_exception);
6551 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
6552
6553 __ bind(&throw_termination_exception);
6554 GenerateThrowUncatchable(masm, TERMINATION);
6555
6556 __ bind(&throw_normal_exception);
6557 GenerateThrowTOS(masm);
6558}
6559
6560
6561void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
6562 // r0: code entry
6563 // r1: function
6564 // r2: receiver
6565 // r3: argc
6566 // [sp+0]: argv
6567
6568 Label invoke, exit;
6569
6570 // Called from C, so do not pop argc and args on exit (preserve sp)
6571 // No need to save register-passed args
6572 // Save callee-saved registers (incl. cp and fp), sp, and lr
6573 __ stm(db_w, sp, kCalleeSaved | lr.bit());
6574
6575 // Get address of argv, see stm above.
6576 // r0: code entry
6577 // r1: function
6578 // r2: receiver
6579 // r3: argc
Leon Clarke4515c472010-02-03 11:58:03 +00006580 __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
Steve Blocka7e24c12009-10-30 11:49:00 +00006581
6582 // Push a frame with special values setup to mark it as an entry frame.
6583 // r0: code entry
6584 // r1: function
6585 // r2: receiver
6586 // r3: argc
6587 // r4: argv
6588 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
6589 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
6590 __ mov(r7, Operand(Smi::FromInt(marker)));
6591 __ mov(r6, Operand(Smi::FromInt(marker)));
6592 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6593 __ ldr(r5, MemOperand(r5));
6594 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
6595
6596 // Setup frame pointer for the frame to be pushed.
6597 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6598
6599 // Call a faked try-block that does the invoke.
6600 __ bl(&invoke);
6601
6602 // Caught exception: Store result (exception) in the pending
6603 // exception field in the JSEnv and return a failure sentinel.
6604 // Coming in here the fp will be invalid because the PushTryHandler below
6605 // sets it to 0 to signal the existence of the JSEntry frame.
6606 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6607 __ str(r0, MemOperand(ip));
6608 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6609 __ b(&exit);
6610
6611 // Invoke: Link this frame into the handler chain.
6612 __ bind(&invoke);
6613 // Must preserve r0-r4, r5-r7 are available.
6614 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6615 // If an exception not caught by another handler occurs, this handler
6616 // returns control to the code after the bl(&invoke) above, which
6617 // restores all kCalleeSaved registers (including cp and fp) to their
6618 // saved values before returning a failure to C.
6619
6620 // Clear any pending exceptions.
6621 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6622 __ ldr(r5, MemOperand(ip));
6623 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6624 __ str(r5, MemOperand(ip));
6625
6626 // Invoke the function by calling through JS entry trampoline builtin.
6627 // Notice that we cannot store a reference to the trampoline code directly in
6628 // this stub, because runtime stubs are not traversed when doing GC.
6629
6630 // Expected registers by Builtins::JSEntryTrampoline
6631 // r0: code entry
6632 // r1: function
6633 // r2: receiver
6634 // r3: argc
6635 // r4: argv
6636 if (is_construct) {
6637 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6638 __ mov(ip, Operand(construct_entry));
6639 } else {
6640 ExternalReference entry(Builtins::JSEntryTrampoline);
6641 __ mov(ip, Operand(entry));
6642 }
6643 __ ldr(ip, MemOperand(ip)); // deref address
6644
6645 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6646 // macro for the add instruction because we don't want the coverage tool
6647 // inserting instructions here after we read the pc.
6648 __ mov(lr, Operand(pc));
6649 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6650
6651 // Unlink this frame from the handler chain. When reading the
6652 // address of the next handler, there is no need to use the address
6653 // displacement since the current stack pointer (sp) points directly
6654 // to the stack handler.
6655 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6656 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6657 __ str(r3, MemOperand(ip));
6658 // No need to restore registers
6659 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6660
6661
6662 __ bind(&exit); // r0 holds result
6663 // Restore the top frame descriptors from the stack.
6664 __ pop(r3);
6665 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6666 __ str(r3, MemOperand(ip));
6667
6668 // Reset the stack to the callee saved registers.
6669 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6670
6671 // Restore callee-saved registers and return.
6672#ifdef DEBUG
6673 if (FLAG_debug_code) {
6674 __ mov(lr, Operand(pc));
6675 }
6676#endif
6677 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6678}
6679
6680
6681// This stub performs an instanceof, calling the builtin function if
6682// necessary. Uses r1 for the object, r0 for the function that it may
6683// be an instance of (these are fetched from the stack).
6684void InstanceofStub::Generate(MacroAssembler* masm) {
6685 // Get the object - slow case for smis (we may need to throw an exception
6686 // depending on the rhs).
6687 Label slow, loop, is_instance, is_not_instance;
6688 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6689 __ BranchOnSmi(r0, &slow);
6690
6691 // Check that the left hand is a JS object and put map in r3.
6692 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6693 __ b(lt, &slow);
6694 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6695 __ b(gt, &slow);
6696
6697 // Get the prototype of the function (r4 is result, r2 is scratch).
Andrei Popescu402d9372010-02-26 13:31:12 +00006698 __ ldr(r1, MemOperand(sp, 0));
Steve Blocka7e24c12009-10-30 11:49:00 +00006699 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6700
6701 // Check that the function prototype is a JS object.
6702 __ BranchOnSmi(r4, &slow);
6703 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6704 __ b(lt, &slow);
6705 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6706 __ b(gt, &slow);
6707
6708 // Register mapping: r3 is object map and r4 is function prototype.
6709 // Get prototype of object into r2.
6710 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6711
6712 // Loop through the prototype chain looking for the function prototype.
6713 __ bind(&loop);
6714 __ cmp(r2, Operand(r4));
6715 __ b(eq, &is_instance);
6716 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6717 __ cmp(r2, ip);
6718 __ b(eq, &is_not_instance);
6719 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6720 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6721 __ jmp(&loop);
6722
6723 __ bind(&is_instance);
6724 __ mov(r0, Operand(Smi::FromInt(0)));
6725 __ pop();
6726 __ pop();
6727 __ mov(pc, Operand(lr)); // Return.
6728
6729 __ bind(&is_not_instance);
6730 __ mov(r0, Operand(Smi::FromInt(1)));
6731 __ pop();
6732 __ pop();
6733 __ mov(pc, Operand(lr)); // Return.
6734
6735 // Slow-case. Tail call builtin.
6736 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006737 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6738}
6739
6740
6741void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6742 // Check if the calling frame is an arguments adaptor frame.
6743 Label adaptor;
6744 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6745 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6746 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6747 __ b(eq, &adaptor);
6748
6749 // Nothing to do: The formal number of parameters has already been
6750 // passed in register r0 by calling function. Just return it.
6751 __ Jump(lr);
6752
6753 // Arguments adaptor case: Read the arguments length from the
6754 // adaptor frame and return it.
6755 __ bind(&adaptor);
6756 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6757 __ Jump(lr);
6758}
6759
6760
6761void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6762 // The displacement is the offset of the last parameter (if any)
6763 // relative to the frame pointer.
6764 static const int kDisplacement =
6765 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6766
6767 // Check that the key is a smi.
6768 Label slow;
6769 __ BranchOnNotSmi(r1, &slow);
6770
6771 // Check if the calling frame is an arguments adaptor frame.
6772 Label adaptor;
6773 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6774 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6775 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6776 __ b(eq, &adaptor);
6777
6778 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00006779 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00006780 // check for free.
6781 __ cmp(r1, r0);
6782 __ b(cs, &slow);
6783
6784 // Read the argument from the stack and return it.
6785 __ sub(r3, r0, r1);
6786 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6787 __ ldr(r0, MemOperand(r3, kDisplacement));
6788 __ Jump(lr);
6789
6790 // Arguments adaptor case: Check index against actual arguments
6791 // limit found in the arguments adaptor frame. Use unsigned
6792 // comparison to get negative check for free.
6793 __ bind(&adaptor);
6794 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6795 __ cmp(r1, r0);
6796 __ b(cs, &slow);
6797
6798 // Read the argument from the adaptor frame and return it.
6799 __ sub(r3, r0, r1);
6800 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6801 __ ldr(r0, MemOperand(r3, kDisplacement));
6802 __ Jump(lr);
6803
6804 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6805 // by calling the runtime system.
6806 __ bind(&slow);
6807 __ push(r1);
6808 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6809}
6810
6811
6812void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Andrei Popescu402d9372010-02-26 13:31:12 +00006813 // sp[0] : number of parameters
6814 // sp[4] : receiver displacement
6815 // sp[8] : function
6816
Steve Blocka7e24c12009-10-30 11:49:00 +00006817 // Check if the calling frame is an arguments adaptor frame.
Andrei Popescu402d9372010-02-26 13:31:12 +00006818 Label adaptor_frame, try_allocate, runtime;
Steve Blocka7e24c12009-10-30 11:49:00 +00006819 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6820 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6821 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Andrei Popescu402d9372010-02-26 13:31:12 +00006822 __ b(eq, &adaptor_frame);
6823
6824 // Get the length from the frame.
6825 __ ldr(r1, MemOperand(sp, 0));
6826 __ b(&try_allocate);
Steve Blocka7e24c12009-10-30 11:49:00 +00006827
6828 // Patch the arguments.length and the parameters pointer.
Andrei Popescu402d9372010-02-26 13:31:12 +00006829 __ bind(&adaptor_frame);
6830 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6831 __ str(r1, MemOperand(sp, 0));
6832 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00006833 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6834 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6835
Andrei Popescu402d9372010-02-26 13:31:12 +00006836 // Try the new space allocation. Start out with computing the size
6837 // of the arguments object and the elements array (in words, not
6838 // bytes because AllocateInNewSpace expects words).
6839 Label add_arguments_object;
6840 __ bind(&try_allocate);
6841 __ cmp(r1, Operand(0));
6842 __ b(eq, &add_arguments_object);
6843 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
6844 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
6845 __ bind(&add_arguments_object);
6846 __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
6847
6848 // Do the allocation of both objects in one go.
6849 __ AllocateInNewSpace(r1, r0, r2, r3, &runtime, TAG_OBJECT);
6850
6851 // Get the arguments boilerplate from the current (global) context.
6852 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
6853 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6854 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
6855 __ ldr(r4, MemOperand(r4, offset));
6856
6857 // Copy the JS object part.
6858 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
6859 __ ldr(r3, FieldMemOperand(r4, i));
6860 __ str(r3, FieldMemOperand(r0, i));
6861 }
6862
6863 // Setup the callee in-object property.
6864 ASSERT(Heap::arguments_callee_index == 0);
6865 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6866 __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
6867
6868 // Get the length (smi tagged) and set that as an in-object property too.
6869 ASSERT(Heap::arguments_length_index == 1);
6870 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6871 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
6872
6873 // If there are no actual arguments, we're done.
6874 Label done;
6875 __ cmp(r1, Operand(0));
6876 __ b(eq, &done);
6877
6878 // Get the parameters pointer from the stack and untag the length.
6879 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
6880 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
6881
6882 // Setup the elements pointer in the allocated arguments object and
6883 // initialize the header in the elements fixed array.
6884 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
6885 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
6886 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
6887 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
6888 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
6889
6890 // Copy the fixed array slots.
6891 Label loop;
6892 // Setup r4 to point to the first array slot.
6893 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6894 __ bind(&loop);
6895 // Pre-decrement r2 with kPointerSize on each iteration.
6896 // Pre-decrement in order to skip receiver.
6897 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
6898 // Post-increment r4 with kPointerSize on each iteration.
6899 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
6900 __ sub(r1, r1, Operand(1));
6901 __ cmp(r1, Operand(0));
6902 __ b(ne, &loop);
6903
6904 // Return and remove the on-stack parameters.
6905 __ bind(&done);
6906 __ add(sp, sp, Operand(3 * kPointerSize));
6907 __ Ret();
6908
Steve Blocka7e24c12009-10-30 11:49:00 +00006909 // Do the runtime call to allocate the arguments object.
6910 __ bind(&runtime);
6911 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6912}
6913
6914
6915void CallFunctionStub::Generate(MacroAssembler* masm) {
6916 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00006917
6918 // If the receiver might be a value (string, number or boolean) check for this
6919 // and box it if it is.
6920 if (ReceiverMightBeValue()) {
6921 // Get the receiver from the stack.
6922 // function, receiver [, arguments]
6923 Label receiver_is_value, receiver_is_js_object;
6924 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
6925
6926 // Check if receiver is a smi (which is a number value).
6927 __ BranchOnSmi(r1, &receiver_is_value);
6928
6929 // Check if the receiver is a valid JS object.
6930 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
6931 __ b(ge, &receiver_is_js_object);
6932
6933 // Call the runtime to box the value.
6934 __ bind(&receiver_is_value);
6935 __ EnterInternalFrame();
6936 __ push(r1);
6937 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
6938 __ LeaveInternalFrame();
6939 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
6940
6941 __ bind(&receiver_is_js_object);
6942 }
6943
Steve Blocka7e24c12009-10-30 11:49:00 +00006944 // Get the function to call from the stack.
6945 // function, receiver [, arguments]
6946 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6947
6948 // Check that the function is really a JavaScript function.
6949 // r1: pushed function (to be verified)
6950 __ BranchOnSmi(r1, &slow);
6951 // Get the map of the function object.
6952 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6953 __ b(ne, &slow);
6954
6955 // Fast-case: Invoke the function now.
6956 // r1: pushed function
6957 ParameterCount actual(argc_);
6958 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6959
6960 // Slow-case: Non-function called.
6961 __ bind(&slow);
Andrei Popescu402d9372010-02-26 13:31:12 +00006962 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
6963 // of the original receiver from the call site).
6964 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00006965 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6966 __ mov(r2, Operand(0));
6967 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6968 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6969 RelocInfo::CODE_TARGET);
6970}
6971
6972
Leon Clarkee46be812010-01-19 14:06:41 +00006973const char* CompareStub::GetName() {
6974 switch (cc_) {
6975 case lt: return "CompareStub_LT";
6976 case gt: return "CompareStub_GT";
6977 case le: return "CompareStub_LE";
6978 case ge: return "CompareStub_GE";
6979 case ne: {
6980 if (strict_) {
6981 if (never_nan_nan_) {
6982 return "CompareStub_NE_STRICT_NO_NAN";
6983 } else {
6984 return "CompareStub_NE_STRICT";
6985 }
6986 } else {
6987 if (never_nan_nan_) {
6988 return "CompareStub_NE_NO_NAN";
6989 } else {
6990 return "CompareStub_NE";
6991 }
6992 }
6993 }
6994 case eq: {
6995 if (strict_) {
6996 if (never_nan_nan_) {
6997 return "CompareStub_EQ_STRICT_NO_NAN";
6998 } else {
6999 return "CompareStub_EQ_STRICT";
7000 }
7001 } else {
7002 if (never_nan_nan_) {
7003 return "CompareStub_EQ_NO_NAN";
7004 } else {
7005 return "CompareStub_EQ";
7006 }
7007 }
7008 }
7009 default: return "CompareStub";
7010 }
7011}
7012
7013
Steve Blocka7e24c12009-10-30 11:49:00 +00007014int CompareStub::MinorKey() {
Leon Clarkee46be812010-01-19 14:06:41 +00007015 // Encode the three parameters in a unique 16 bit value.
7016 ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
7017 int nnn_value = (never_nan_nan_ ? 2 : 0);
7018 if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
7019 return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00007020}
7021
7022
Andrei Popescu31002712010-02-23 13:46:05 +00007023void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
7024 Register dest,
7025 Register src,
7026 Register count,
7027 Register scratch,
7028 bool ascii) {
7029 Label loop;
7030 Label done;
7031 // This loop just copies one character at a time, as it is only used for very
7032 // short strings.
7033 if (!ascii) {
7034 __ add(count, count, Operand(count), SetCC);
7035 } else {
7036 __ cmp(count, Operand(0));
7037 }
7038 __ b(eq, &done);
7039
7040 __ bind(&loop);
7041 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
7042 // Perform sub between load and dependent store to get the load time to
7043 // complete.
7044 __ sub(count, count, Operand(1), SetCC);
7045 __ strb(scratch, MemOperand(dest, 1, PostIndex));
7046 // last iteration.
7047 __ b(gt, &loop);
7048
7049 __ bind(&done);
7050}
7051
7052
7053enum CopyCharactersFlags {
7054 COPY_ASCII = 1,
7055 DEST_ALWAYS_ALIGNED = 2
7056};
7057
7058
7059void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
7060 Register dest,
7061 Register src,
7062 Register count,
7063 Register scratch1,
7064 Register scratch2,
7065 Register scratch3,
7066 Register scratch4,
7067 Register scratch5,
7068 int flags) {
7069 bool ascii = (flags & COPY_ASCII) != 0;
7070 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
7071
7072 if (dest_always_aligned && FLAG_debug_code) {
7073 // Check that destination is actually word aligned if the flag says
7074 // that it is.
7075 __ tst(dest, Operand(kPointerAlignmentMask));
7076 __ Check(eq, "Destination of copy not aligned.");
7077 }
7078
7079 const int kReadAlignment = 4;
7080 const int kReadAlignmentMask = kReadAlignment - 1;
7081 // Ensure that reading an entire aligned word containing the last character
7082 // of a string will not read outside the allocated area (because we pad up
7083 // to kObjectAlignment).
7084 ASSERT(kObjectAlignment >= kReadAlignment);
7085 // Assumes word reads and writes are little endian.
7086 // Nothing to do for zero characters.
7087 Label done;
7088 if (!ascii) {
7089 __ add(count, count, Operand(count), SetCC);
7090 } else {
7091 __ cmp(count, Operand(0));
7092 }
7093 __ b(eq, &done);
7094
7095 // Assume that you cannot read (or write) unaligned.
7096 Label byte_loop;
7097 // Must copy at least eight bytes, otherwise just do it one byte at a time.
7098 __ cmp(count, Operand(8));
7099 __ add(count, dest, Operand(count));
7100 Register limit = count; // Read until src equals this.
7101 __ b(lt, &byte_loop);
7102
7103 if (!dest_always_aligned) {
7104 // Align dest by byte copying. Copies between zero and three bytes.
7105 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
7106 Label dest_aligned;
7107 __ b(eq, &dest_aligned);
7108 __ cmp(scratch4, Operand(2));
7109 __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
7110 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
7111 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
7112 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
7113 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
7114 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
7115 __ bind(&dest_aligned);
7116 }
7117
7118 Label simple_loop;
7119
7120 __ sub(scratch4, dest, Operand(src));
7121 __ and_(scratch4, scratch4, Operand(0x03), SetCC);
7122 __ b(eq, &simple_loop);
7123 // Shift register is number of bits in a source word that
7124 // must be combined with bits in the next source word in order
7125 // to create a destination word.
7126
7127 // Complex loop for src/dst that are not aligned the same way.
7128 {
7129 Label loop;
7130 __ mov(scratch4, Operand(scratch4, LSL, 3));
7131 Register left_shift = scratch4;
7132 __ and_(src, src, Operand(~3)); // Round down to load previous word.
7133 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
7134 // Store the "shift" most significant bits of scratch in the least
7135 // signficant bits (i.e., shift down by (32-shift)).
7136 __ rsb(scratch2, left_shift, Operand(32));
7137 Register right_shift = scratch2;
7138 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
7139
7140 __ bind(&loop);
7141 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
7142 __ sub(scratch5, limit, Operand(dest));
7143 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
7144 __ str(scratch1, MemOperand(dest, 4, PostIndex));
7145 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
7146 // Loop if four or more bytes left to copy.
7147 // Compare to eight, because we did the subtract before increasing dst.
7148 __ sub(scratch5, scratch5, Operand(8), SetCC);
7149 __ b(ge, &loop);
7150 }
7151 // There is now between zero and three bytes left to copy (negative that
7152 // number is in scratch5), and between one and three bytes already read into
7153 // scratch1 (eight times that number in scratch4). We may have read past
7154 // the end of the string, but because objects are aligned, we have not read
7155 // past the end of the object.
7156 // Find the minimum of remaining characters to move and preloaded characters
7157 // and write those as bytes.
7158 __ add(scratch5, scratch5, Operand(4), SetCC);
7159 __ b(eq, &done);
7160 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
7161 // Move minimum of bytes read and bytes left to copy to scratch4.
7162 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
7163 // Between one and three (value in scratch5) characters already read into
7164 // scratch ready to write.
7165 __ cmp(scratch5, Operand(2));
7166 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
7167 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
7168 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
7169 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
7170 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
7171 // Copy any remaining bytes.
7172 __ b(&byte_loop);
7173
7174 // Simple loop.
7175 // Copy words from src to dst, until less than four bytes left.
7176 // Both src and dest are word aligned.
7177 __ bind(&simple_loop);
7178 {
7179 Label loop;
7180 __ bind(&loop);
7181 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
7182 __ sub(scratch3, limit, Operand(dest));
7183 __ str(scratch1, MemOperand(dest, 4, PostIndex));
7184 // Compare to 8, not 4, because we do the substraction before increasing
7185 // dest.
7186 __ cmp(scratch3, Operand(8));
7187 __ b(ge, &loop);
7188 }
7189
7190 // Copy bytes from src to dst until dst hits limit.
7191 __ bind(&byte_loop);
7192 __ cmp(dest, Operand(limit));
7193 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
7194 __ b(ge, &done);
7195 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
7196 __ b(&byte_loop);
7197
7198 __ bind(&done);
7199}
7200
7201
7202void SubStringStub::Generate(MacroAssembler* masm) {
7203 Label runtime;
7204
7205 // Stack frame on entry.
7206 // lr: return address
7207 // sp[0]: to
7208 // sp[4]: from
7209 // sp[8]: string
7210
7211 // This stub is called from the native-call %_SubString(...), so
7212 // nothing can be assumed about the arguments. It is tested that:
7213 // "string" is a sequential string,
7214 // both "from" and "to" are smis, and
7215 // 0 <= from <= to <= string.length.
7216 // If any of these assumptions fail, we call the runtime system.
7217
7218 static const int kToOffset = 0 * kPointerSize;
7219 static const int kFromOffset = 1 * kPointerSize;
7220 static const int kStringOffset = 2 * kPointerSize;
7221
7222
7223 // Check bounds and smi-ness.
7224 __ ldr(r7, MemOperand(sp, kToOffset));
7225 __ ldr(r6, MemOperand(sp, kFromOffset));
7226 ASSERT_EQ(0, kSmiTag);
7227 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
7228 // I.e., arithmetic shift right by one un-smi-tags.
7229 __ mov(r2, Operand(r7, ASR, 1), SetCC);
7230 __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
7231 // If either r2 or r6 had the smi tag bit set, then carry is set now.
7232 __ b(cs, &runtime); // Either "from" or "to" is not a smi.
7233 __ b(mi, &runtime); // From is negative.
7234
7235 __ sub(r2, r2, Operand(r3), SetCC);
7236 __ b(mi, &runtime); // Fail if from > to.
7237 // Handle sub-strings of length 2 and less in the runtime system.
7238 __ cmp(r2, Operand(2));
7239 __ b(le, &runtime);
7240
7241 // r2: length
7242 // r6: from (smi)
7243 // r7: to (smi)
7244
7245 // Make sure first argument is a sequential (or flat) string.
7246 __ ldr(r5, MemOperand(sp, kStringOffset));
7247 ASSERT_EQ(0, kSmiTag);
7248 __ tst(r5, Operand(kSmiTagMask));
7249 __ b(eq, &runtime);
7250 Condition is_string = masm->IsObjectStringType(r5, r1);
7251 __ b(NegateCondition(is_string), &runtime);
7252
7253 // r1: instance type
7254 // r2: length
7255 // r5: string
7256 // r6: from (smi)
7257 // r7: to (smi)
7258 Label seq_string;
7259 __ and_(r4, r1, Operand(kStringRepresentationMask));
7260 ASSERT(kSeqStringTag < kConsStringTag);
7261 ASSERT(kExternalStringTag > kConsStringTag);
7262 __ cmp(r4, Operand(kConsStringTag));
7263 __ b(gt, &runtime); // External strings go to runtime.
7264 __ b(lt, &seq_string); // Sequential strings are handled directly.
7265
7266 // Cons string. Try to recurse (once) on the first substring.
7267 // (This adds a little more generality than necessary to handle flattened
7268 // cons strings, but not much).
7269 __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
7270 __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
7271 __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7272 __ tst(r1, Operand(kStringRepresentationMask));
7273 ASSERT_EQ(0, kSeqStringTag);
7274 __ b(ne, &runtime); // Cons and External strings go to runtime.
7275
7276 // Definitly a sequential string.
7277 __ bind(&seq_string);
7278
7279 // r1: instance type.
7280 // r2: length
7281 // r5: string
7282 // r6: from (smi)
7283 // r7: to (smi)
7284 __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
7285 __ cmp(r4, Operand(r7, ASR, 1));
7286 __ b(lt, &runtime); // Fail if to > length.
7287
7288 // r1: instance type.
7289 // r2: result string length.
7290 // r5: string.
7291 // r6: from offset (smi)
7292 // Check for flat ascii string.
7293 Label non_ascii_flat;
7294 __ tst(r1, Operand(kStringEncodingMask));
7295 ASSERT_EQ(0, kTwoByteStringTag);
7296 __ b(eq, &non_ascii_flat);
7297
7298 // Allocate the result.
7299 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
7300
7301 // r0: result string.
7302 // r2: result string length.
7303 // r5: string.
7304 // r6: from offset (smi)
7305 // Locate first character of result.
7306 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7307 // Locate 'from' character of string.
7308 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7309 __ add(r5, r5, Operand(r6, ASR, 1));
7310
7311 // r0: result string.
7312 // r1: first character of result string.
7313 // r2: result string length.
7314 // r5: first character of sub string to copy.
7315 ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
7316 GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
7317 COPY_ASCII | DEST_ALWAYS_ALIGNED);
7318 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
7319 __ add(sp, sp, Operand(3 * kPointerSize));
7320 __ Ret();
7321
7322 __ bind(&non_ascii_flat);
7323 // r2: result string length.
7324 // r5: string.
7325 // r6: from offset (smi)
7326 // Check for flat two byte string.
7327
7328 // Allocate the result.
7329 __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
7330
7331 // r0: result string.
7332 // r2: result string length.
7333 // r5: string.
7334 // Locate first character of result.
7335 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7336 // Locate 'from' character of string.
7337 __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7338 // As "from" is a smi it is 2 times the value which matches the size of a two
7339 // byte character.
7340 __ add(r5, r5, Operand(r6));
7341
7342 // r0: result string.
7343 // r1: first character of result.
7344 // r2: result length.
7345 // r5: first character of string to copy.
7346 ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
7347 GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
7348 DEST_ALWAYS_ALIGNED);
7349 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
7350 __ add(sp, sp, Operand(3 * kPointerSize));
7351 __ Ret();
7352
7353 // Just jump to runtime to create the sub string.
7354 __ bind(&runtime);
7355 __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
7356}
Leon Clarked91b9f72010-01-27 17:25:45 +00007357
7358
7359void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
7360 Register left,
7361 Register right,
7362 Register scratch1,
7363 Register scratch2,
7364 Register scratch3,
7365 Register scratch4) {
7366 Label compare_lengths;
7367 // Find minimum length and length difference.
7368 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
7369 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
7370 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
7371 Register length_delta = scratch3;
7372 __ mov(scratch1, scratch2, LeaveCC, gt);
7373 Register min_length = scratch1;
7374 __ tst(min_length, Operand(min_length));
7375 __ b(eq, &compare_lengths);
7376
7377 // Setup registers so that we only need to increment one register
7378 // in the loop.
7379 __ add(scratch2, min_length,
7380 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7381 __ add(left, left, Operand(scratch2));
7382 __ add(right, right, Operand(scratch2));
7383 // Registers left and right points to the min_length character of strings.
7384 __ rsb(min_length, min_length, Operand(-1));
7385 Register index = min_length;
7386 // Index starts at -min_length.
7387
7388 {
7389 // Compare loop.
7390 Label loop;
7391 __ bind(&loop);
7392 // Compare characters.
7393 __ add(index, index, Operand(1), SetCC);
7394 __ ldrb(scratch2, MemOperand(left, index), ne);
7395 __ ldrb(scratch4, MemOperand(right, index), ne);
7396 // Skip to compare lengths with eq condition true.
7397 __ b(eq, &compare_lengths);
7398 __ cmp(scratch2, scratch4);
7399 __ b(eq, &loop);
7400 // Fallthrough with eq condition false.
7401 }
7402 // Compare lengths - strings up to min-length are equal.
7403 __ bind(&compare_lengths);
7404 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
7405 // Use zero length_delta as result.
7406 __ mov(r0, Operand(length_delta), SetCC, eq);
7407 // Fall through to here if characters compare not-equal.
7408 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
7409 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
7410 __ Ret();
7411}
7412
7413
7414void StringCompareStub::Generate(MacroAssembler* masm) {
7415 Label runtime;
7416
7417 // Stack frame on entry.
Andrei Popescu31002712010-02-23 13:46:05 +00007418 // sp[0]: right string
7419 // sp[4]: left string
7420 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
7421 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
Leon Clarked91b9f72010-01-27 17:25:45 +00007422
7423 Label not_same;
7424 __ cmp(r0, r1);
7425 __ b(ne, &not_same);
7426 ASSERT_EQ(0, EQUAL);
7427 ASSERT_EQ(0, kSmiTag);
7428 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
7429 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
7430 __ add(sp, sp, Operand(2 * kPointerSize));
7431 __ Ret();
7432
7433 __ bind(&not_same);
7434
7435 // Check that both objects are sequential ascii strings.
7436 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
7437
7438 // Compare flat ascii strings natively. Remove arguments from stack first.
7439 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
7440 __ add(sp, sp, Operand(2 * kPointerSize));
7441 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
7442
7443 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
7444 // tagged as a small integer.
7445 __ bind(&runtime);
7446 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
7447}
7448
7449
Andrei Popescu31002712010-02-23 13:46:05 +00007450void StringAddStub::Generate(MacroAssembler* masm) {
7451 Label string_add_runtime;
7452 // Stack on entry:
7453 // sp[0]: second argument.
7454 // sp[4]: first argument.
7455
7456 // Load the two arguments.
7457 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
7458 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
7459
7460 // Make sure that both arguments are strings if not known in advance.
7461 if (string_check_) {
7462 ASSERT_EQ(0, kSmiTag);
7463 __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
7464 // Load instance types.
7465 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7466 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7467 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7468 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7469 ASSERT_EQ(0, kStringTag);
7470 // If either is not a string, go to runtime.
7471 __ tst(r4, Operand(kIsNotStringMask));
7472 __ tst(r5, Operand(kIsNotStringMask), eq);
7473 __ b(ne, &string_add_runtime);
7474 }
7475
7476 // Both arguments are strings.
7477 // r0: first string
7478 // r1: second string
7479 // r4: first string instance type (if string_check_)
7480 // r5: second string instance type (if string_check_)
7481 {
7482 Label strings_not_empty;
7483 // Check if either of the strings are empty. In that case return the other.
7484 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
7485 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
7486 __ cmp(r2, Operand(0)); // Test if first string is empty.
7487 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
7488 __ cmp(r3, Operand(0), ne); // Else test if second string is empty.
7489 __ b(ne, &strings_not_empty); // If either string was empty, return r0.
7490
7491 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7492 __ add(sp, sp, Operand(2 * kPointerSize));
7493 __ Ret();
7494
7495 __ bind(&strings_not_empty);
7496 }
7497
7498 // Both strings are non-empty.
7499 // r0: first string
7500 // r1: second string
7501 // r2: length of first string
7502 // r3: length of second string
7503 // r4: first string instance type (if string_check_)
7504 // r5: second string instance type (if string_check_)
7505 // Look at the length of the result of adding the two strings.
7506 Label string_add_flat_result;
7507 // Adding two lengths can't overflow.
7508 ASSERT(String::kMaxLength * 2 > String::kMaxLength);
7509 __ add(r6, r2, Operand(r3));
7510 // Use the runtime system when adding two one character strings, as it
7511 // contains optimizations for this specific case using the symbol table.
7512 __ cmp(r6, Operand(2));
7513 __ b(eq, &string_add_runtime);
7514 // Check if resulting string will be flat.
7515 __ cmp(r6, Operand(String::kMinNonFlatLength));
7516 __ b(lt, &string_add_flat_result);
7517 // Handle exceptionally long strings in the runtime system.
7518 ASSERT((String::kMaxLength & 0x80000000) == 0);
7519 ASSERT(IsPowerOf2(String::kMaxLength + 1));
7520 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
7521 __ cmp(r6, Operand(String::kMaxLength + 1));
7522 __ b(hs, &string_add_runtime);
7523
7524 // If result is not supposed to be flat, allocate a cons string object.
7525 // If both strings are ascii the result is an ascii cons string.
7526 if (!string_check_) {
7527 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7528 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7529 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7530 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7531 }
7532 Label non_ascii, allocated;
7533 ASSERT_EQ(0, kTwoByteStringTag);
7534 __ tst(r4, Operand(kStringEncodingMask));
7535 __ tst(r5, Operand(kStringEncodingMask), ne);
7536 __ b(eq, &non_ascii);
7537
7538 // Allocate an ASCII cons string.
7539 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
7540 __ bind(&allocated);
7541 // Fill the fields of the cons string.
7542 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
7543 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
7544 __ mov(r0, Operand(r7));
7545 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7546 __ add(sp, sp, Operand(2 * kPointerSize));
7547 __ Ret();
7548
7549 __ bind(&non_ascii);
7550 // Allocate a two byte cons string.
7551 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
7552 __ jmp(&allocated);
7553
7554 // Handle creating a flat result. First check that both strings are
7555 // sequential and that they have the same encoding.
7556 // r0: first string
7557 // r1: second string
7558 // r2: length of first string
7559 // r3: length of second string
7560 // r4: first string instance type (if string_check_)
7561 // r5: second string instance type (if string_check_)
7562 // r6: sum of lengths.
7563 __ bind(&string_add_flat_result);
7564 if (!string_check_) {
7565 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7566 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7567 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7568 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7569 }
7570 // Check that both strings are sequential.
7571 ASSERT_EQ(0, kSeqStringTag);
7572 __ tst(r4, Operand(kStringRepresentationMask));
7573 __ tst(r5, Operand(kStringRepresentationMask), eq);
7574 __ b(ne, &string_add_runtime);
7575 // Now check if both strings have the same encoding (ASCII/Two-byte).
7576 // r0: first string.
7577 // r1: second string.
7578 // r2: length of first string.
7579 // r3: length of second string.
7580 // r6: sum of lengths..
7581 Label non_ascii_string_add_flat_result;
7582 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
7583 __ eor(r7, r4, Operand(r5));
7584 __ tst(r7, Operand(kStringEncodingMask));
7585 __ b(ne, &string_add_runtime);
7586 // And see if it's ASCII or two-byte.
7587 __ tst(r4, Operand(kStringEncodingMask));
7588 __ b(eq, &non_ascii_string_add_flat_result);
7589
7590 // Both strings are sequential ASCII strings. We also know that they are
7591 // short (since the sum of the lengths is less than kMinNonFlatLength).
7592 __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
7593 // Locate first character of result.
7594 __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7595 // Locate first character of first argument.
7596 __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7597 // r0: first character of first string.
7598 // r1: second string.
7599 // r2: length of first string.
7600 // r3: length of second string.
7601 // r6: first character of result.
7602 // r7: result string.
7603 GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
7604
7605 // Load second argument and locate first character.
7606 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7607 // r1: first character of second string.
7608 // r3: length of second string.
7609 // r6: next character of result.
7610 // r7: result string.
7611 GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
7612 __ mov(r0, Operand(r7));
7613 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7614 __ add(sp, sp, Operand(2 * kPointerSize));
7615 __ Ret();
7616
7617 __ bind(&non_ascii_string_add_flat_result);
7618 // Both strings are sequential two byte strings.
7619 // r0: first string.
7620 // r1: second string.
7621 // r2: length of first string.
7622 // r3: length of second string.
7623 // r6: sum of length of strings.
7624 __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
7625 // r0: first string.
7626 // r1: second string.
7627 // r2: length of first string.
7628 // r3: length of second string.
7629 // r7: result string.
7630
7631 // Locate first character of result.
7632 __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7633 // Locate first character of first argument.
7634 __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7635
7636 // r0: first character of first string.
7637 // r1: second string.
7638 // r2: length of first string.
7639 // r3: length of second string.
7640 // r6: first character of result.
7641 // r7: result string.
7642 GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
7643
7644 // Locate first character of second argument.
7645 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7646
7647 // r1: first character of second string.
7648 // r3: length of second string.
7649 // r6: next character of result (after copy of first string).
7650 // r7: result string.
7651 GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
7652
7653 __ mov(r0, Operand(r7));
7654 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7655 __ add(sp, sp, Operand(2 * kPointerSize));
7656 __ Ret();
7657
7658 // Just jump to runtime to add the two strings.
7659 __ bind(&string_add_runtime);
7660 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
7661}
7662
7663
Steve Blocka7e24c12009-10-30 11:49:00 +00007664#undef __
7665
7666} } // namespace v8::internal