blob: 64ed425a77979fd88580c96e6e80544a044aed87 [file] [log] [blame]
Leon Clarked91b9f72010-01-27 17:25:45 +00001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Leon Clarkef7060e22010-06-03 12:02:55 +010030#if defined(V8_TARGET_ARCH_ARM)
31
Steve Blocka7e24c12009-10-30 11:49:00 +000032#include "bootstrapper.h"
33#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000034#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000035#include "debug.h"
Steve Block6ded16b2010-05-10 14:33:55 +010036#include "ic-inl.h"
37#include "jsregexp.h"
Kristian Monsen25f61362010-05-21 11:50:48 +010038#include "jump-target-light-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "parser.h"
Steve Block6ded16b2010-05-10 14:33:55 +010040#include "regexp-macro-assembler.h"
41#include "regexp-stack.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000042#include "register-allocator-inl.h"
43#include "runtime.h"
44#include "scopes.h"
Steve Block6ded16b2010-05-10 14:33:55 +010045#include "virtual-frame-inl.h"
Kristian Monsen25f61362010-05-21 11:50:48 +010046#include "virtual-frame-arm-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047
48namespace v8 {
49namespace internal {
50
Kristian Monsen25f61362010-05-21 11:50:48 +010051
Steve Blocka7e24c12009-10-30 11:49:00 +000052#define __ ACCESS_MASM(masm_)
53
54static void EmitIdenticalObjectComparison(MacroAssembler* masm,
55 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000056 Condition cc,
57 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000058static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000059 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000060 Label* slow,
61 bool strict);
62static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
63static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
64static void MultiplyByKnownInt(MacroAssembler* masm,
65 Register source,
66 Register destination,
67 int known_int);
68static bool IsEasyToMultiplyBy(int x);
69
70
71
72// -------------------------------------------------------------------------
73// Platform-specific DeferredCode functions.
74
75void DeferredCode::SaveRegisters() {
76 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
77 int action = registers_[i];
78 if (action == kPush) {
79 __ push(RegisterAllocator::ToRegister(i));
80 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
81 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
82 }
83 }
84}
85
86
87void DeferredCode::RestoreRegisters() {
88 // Restore registers in reverse order due to the stack.
89 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
90 int action = registers_[i];
91 if (action == kPush) {
92 __ pop(RegisterAllocator::ToRegister(i));
93 } else if (action != kIgnore) {
94 action &= ~kSyncedFlag;
95 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
96 }
97 }
98}
99
100
101// -------------------------------------------------------------------------
102// CodeGenState implementation.
103
104CodeGenState::CodeGenState(CodeGenerator* owner)
105 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000106 true_target_(NULL),
107 false_target_(NULL),
108 previous_(NULL) {
109 owner_->set_state(this);
110}
111
112
113CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000114 JumpTarget* true_target,
115 JumpTarget* false_target)
116 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000117 true_target_(true_target),
118 false_target_(false_target),
119 previous_(owner->state()) {
120 owner_->set_state(this);
121}
122
123
124CodeGenState::~CodeGenState() {
125 ASSERT(owner_->state() == this);
126 owner_->set_state(previous_);
127}
128
129
130// -------------------------------------------------------------------------
131// CodeGenerator implementation
132
Andrei Popescu31002712010-02-23 13:46:05 +0000133CodeGenerator::CodeGenerator(MacroAssembler* masm)
134 : deferred_(8),
Leon Clarke4515c472010-02-03 11:58:03 +0000135 masm_(masm),
Andrei Popescu31002712010-02-23 13:46:05 +0000136 info_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000137 frame_(NULL),
138 allocator_(NULL),
139 cc_reg_(al),
140 state_(NULL),
Steve Block6ded16b2010-05-10 14:33:55 +0100141 loop_nesting_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000142 function_return_is_shadowed_(false) {
143}
144
145
146// Calling conventions:
147// fp: caller's frame pointer
148// sp: stack pointer
149// r1: called JS function
150// cp: callee's context
151
Andrei Popescu402d9372010-02-26 13:31:12 +0000152void CodeGenerator::Generate(CompilationInfo* info) {
Steve Blockd0582a62009-12-15 09:54:21 +0000153 // Record the position for debugging purposes.
Andrei Popescu31002712010-02-23 13:46:05 +0000154 CodeForFunctionPosition(info->function());
Steve Block6ded16b2010-05-10 14:33:55 +0100155 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
Steve Blocka7e24c12009-10-30 11:49:00 +0000156
157 // Initialize state.
Andrei Popescu31002712010-02-23 13:46:05 +0000158 info_ = info;
Steve Blocka7e24c12009-10-30 11:49:00 +0000159 ASSERT(allocator_ == NULL);
160 RegisterAllocator register_allocator(this);
161 allocator_ = &register_allocator;
162 ASSERT(frame_ == NULL);
163 frame_ = new VirtualFrame();
164 cc_reg_ = al;
Steve Block6ded16b2010-05-10 14:33:55 +0100165
166 // Adjust for function-level loop nesting.
167 ASSERT_EQ(0, loop_nesting_);
168 loop_nesting_ = info->loop_nesting();
169
Steve Blocka7e24c12009-10-30 11:49:00 +0000170 {
171 CodeGenState state(this);
172
173 // Entry:
174 // Stack: receiver, arguments
175 // lr: return address
176 // fp: caller's frame pointer
177 // sp: stack pointer
178 // r1: called JS function
179 // cp: callee's context
180 allocator_->Initialize();
Leon Clarke4515c472010-02-03 11:58:03 +0000181
Steve Blocka7e24c12009-10-30 11:49:00 +0000182#ifdef DEBUG
183 if (strlen(FLAG_stop_at) > 0 &&
Andrei Popescu31002712010-02-23 13:46:05 +0000184 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000185 frame_->SpillAll();
186 __ stop("stop-at");
187 }
188#endif
189
Andrei Popescu402d9372010-02-26 13:31:12 +0000190 if (info->mode() == CompilationInfo::PRIMARY) {
Leon Clarke4515c472010-02-03 11:58:03 +0000191 frame_->Enter();
192 // tos: code slot
193
194 // Allocate space for locals and initialize them. This also checks
195 // for stack overflow.
196 frame_->AllocateStackSlots();
197
Steve Block6ded16b2010-05-10 14:33:55 +0100198 VirtualFrame::SpilledScope spilled_scope(frame_);
Kristian Monsen25f61362010-05-21 11:50:48 +0100199 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Leon Clarke4515c472010-02-03 11:58:03 +0000200 if (heap_slots > 0) {
201 // Allocate local context.
202 // Get outer context and create a new context based on it.
203 __ ldr(r0, frame_->Function());
204 frame_->EmitPush(r0);
205 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
206 FastNewContextStub stub(heap_slots);
207 frame_->CallStub(&stub, 1);
208 } else {
209 frame_->CallRuntime(Runtime::kNewContext, 1);
210 }
211
212#ifdef DEBUG
213 JumpTarget verified_true;
Steve Block6ded16b2010-05-10 14:33:55 +0100214 __ cmp(r0, cp);
Leon Clarke4515c472010-02-03 11:58:03 +0000215 verified_true.Branch(eq);
216 __ stop("NewContext: r0 is expected to be the same as cp");
217 verified_true.Bind();
218#endif
219 // Update context local.
220 __ str(cp, frame_->Context());
221 }
222
223 // TODO(1241774): Improve this code:
224 // 1) only needed if we have a context
225 // 2) no need to recompute context ptr every single time
226 // 3) don't copy parameter operand code from SlotOperand!
227 {
228 Comment cmnt2(masm_, "[ copy context parameters into .context");
Leon Clarke4515c472010-02-03 11:58:03 +0000229 // Note that iteration order is relevant here! If we have the same
230 // parameter twice (e.g., function (x, y, x)), and that parameter
231 // needs to be copied into the context, it must be the last argument
232 // passed to the parameter that needs to be copied. This is a rare
233 // case so we don't check for it, instead we rely on the copying
234 // order: such a parameter is copied repeatedly into the same
235 // context location and thus the last value is what is seen inside
236 // the function.
Andrei Popescu31002712010-02-23 13:46:05 +0000237 for (int i = 0; i < scope()->num_parameters(); i++) {
238 Variable* par = scope()->parameter(i);
Leon Clarke4515c472010-02-03 11:58:03 +0000239 Slot* slot = par->slot();
240 if (slot != NULL && slot->type() == Slot::CONTEXT) {
Andrei Popescu31002712010-02-23 13:46:05 +0000241 ASSERT(!scope()->is_global_scope()); // No params in global scope.
Leon Clarke4515c472010-02-03 11:58:03 +0000242 __ ldr(r1, frame_->ParameterAt(i));
243 // Loads r2 with context; used below in RecordWrite.
244 __ str(r1, SlotOperand(slot, r2));
245 // Load the offset into r3.
246 int slot_offset =
247 FixedArray::kHeaderSize + slot->index() * kPointerSize;
248 __ mov(r3, Operand(slot_offset));
249 __ RecordWrite(r2, r3, r1);
250 }
251 }
252 }
253
254 // Store the arguments object. This must happen after context
Steve Block6ded16b2010-05-10 14:33:55 +0100255 // initialization because the arguments object may be stored in
256 // the context.
257 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
258 StoreArgumentsObject(true);
Leon Clarke4515c472010-02-03 11:58:03 +0000259 }
260
261 // Initialize ThisFunction reference if present.
Andrei Popescu31002712010-02-23 13:46:05 +0000262 if (scope()->is_function_scope() && scope()->function() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000263 __ mov(ip, Operand(Factory::the_hole_value()));
264 frame_->EmitPush(ip);
Andrei Popescu31002712010-02-23 13:46:05 +0000265 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
Leon Clarke4515c472010-02-03 11:58:03 +0000266 }
267 } else {
268 // When used as the secondary compiler for splitting, r1, cp,
269 // fp, and lr have been pushed on the stack. Adjust the virtual
270 // frame to match this state.
271 frame_->Adjust(4);
Andrei Popescu402d9372010-02-26 13:31:12 +0000272
273 // Bind all the bailout labels to the beginning of the function.
274 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
275 for (int i = 0; i < bailouts->length(); i++) {
276 __ bind(bailouts->at(i)->label());
277 }
Leon Clarke4515c472010-02-03 11:58:03 +0000278 }
279
Steve Blocka7e24c12009-10-30 11:49:00 +0000280 // Initialize the function return target after the locals are set
281 // up, because it needs the expected frame height from the frame.
Kristian Monsen25f61362010-05-21 11:50:48 +0100282 function_return_.SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 function_return_is_shadowed_ = false;
284
Steve Blocka7e24c12009-10-30 11:49:00 +0000285 // Generate code to 'execute' declarations and initialize functions
286 // (source elements). In case of an illegal redeclaration we need to
287 // handle that instead of processing the declarations.
Andrei Popescu31002712010-02-23 13:46:05 +0000288 if (scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000289 Comment cmnt(masm_, "[ illegal redeclarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000290 scope()->VisitIllegalRedeclaration(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000291 } else {
292 Comment cmnt(masm_, "[ declarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000293 ProcessDeclarations(scope()->declarations());
Steve Blocka7e24c12009-10-30 11:49:00 +0000294 // Bail out if a stack-overflow exception occurred when processing
295 // declarations.
296 if (HasStackOverflow()) return;
297 }
298
299 if (FLAG_trace) {
300 frame_->CallRuntime(Runtime::kTraceEnter, 0);
301 // Ignore the return value.
302 }
303
304 // Compile the body of the function in a vanilla state. Don't
305 // bother compiling all the code if the scope has an illegal
306 // redeclaration.
Andrei Popescu31002712010-02-23 13:46:05 +0000307 if (!scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000308 Comment cmnt(masm_, "[ function body");
309#ifdef DEBUG
310 bool is_builtin = Bootstrapper::IsActive();
311 bool should_trace =
312 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
313 if (should_trace) {
314 frame_->CallRuntime(Runtime::kDebugTrace, 0);
315 // Ignore the return value.
316 }
317#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000318 VisitStatementsAndSpill(info->function()->body());
Steve Blocka7e24c12009-10-30 11:49:00 +0000319 }
320 }
321
322 // Generate the return sequence if necessary.
323 if (has_valid_frame() || function_return_.is_linked()) {
324 if (!function_return_.is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000325 CodeForReturnPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 }
327 // exit
328 // r0: result
329 // sp: stack pointer
330 // fp: frame pointer
331 // cp: callee's context
332 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
333
334 function_return_.Bind();
335 if (FLAG_trace) {
336 // Push the return value on the stack as the parameter.
337 // Runtime::TraceExit returns the parameter as it is.
338 frame_->EmitPush(r0);
339 frame_->CallRuntime(Runtime::kTraceExit, 1);
340 }
341
Steve Block6ded16b2010-05-10 14:33:55 +0100342#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000343 // Add a label for checking the size of the code used for returning.
344 Label check_exit_codesize;
345 masm_->bind(&check_exit_codesize);
Steve Block6ded16b2010-05-10 14:33:55 +0100346#endif
347 // Make sure that the constant pool is not emitted inside of the return
348 // sequence.
349 { Assembler::BlockConstPoolScope block_const_pool(masm_);
350 // Tear down the frame which will restore the caller's frame pointer and
351 // the link register.
352 frame_->Exit();
Steve Blocka7e24c12009-10-30 11:49:00 +0000353
Steve Block6ded16b2010-05-10 14:33:55 +0100354 // Here we use masm_-> instead of the __ macro to avoid the code coverage
355 // tool from instrumenting as we rely on the code size here.
356 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
357 masm_->add(sp, sp, Operand(sp_delta));
358 masm_->Jump(lr);
359
360#ifdef DEBUG
361 // Check that the size of the code used for returning matches what is
362 // expected by the debugger. If the sp_delts above cannot be encoded in
363 // the add instruction the add will generate two instructions.
364 int return_sequence_length =
365 masm_->InstructionsGeneratedSince(&check_exit_codesize);
366 CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
367 return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
368#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000369 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 }
371
Steve Block6ded16b2010-05-10 14:33:55 +0100372 // Adjust for function-level loop nesting.
373 ASSERT(loop_nesting_ == info->loop_nesting());
374 loop_nesting_ = 0;
375
Steve Blocka7e24c12009-10-30 11:49:00 +0000376 // Code generation state must be reset.
377 ASSERT(!has_cc());
378 ASSERT(state_ == NULL);
Steve Block6ded16b2010-05-10 14:33:55 +0100379 ASSERT(loop_nesting() == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000380 ASSERT(!function_return_is_shadowed_);
381 function_return_.Unuse();
382 DeleteFrame();
383
384 // Process any deferred code using the register allocator.
385 if (!HasStackOverflow()) {
386 ProcessDeferred();
387 }
388
389 allocator_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000390}
391
392
393MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
394 // Currently, this assertion will fail if we try to assign to
395 // a constant variable that is constant because it is read-only
396 // (such as the variable referring to a named function expression).
397 // We need to implement assignments to read-only variables.
398 // Ideally, we should do this during AST generation (by converting
399 // such assignments into expression statements); however, in general
400 // we may not be able to make the decision until past AST generation,
401 // that is when the entire program is known.
402 ASSERT(slot != NULL);
403 int index = slot->index();
404 switch (slot->type()) {
405 case Slot::PARAMETER:
406 return frame_->ParameterAt(index);
407
408 case Slot::LOCAL:
409 return frame_->LocalAt(index);
410
411 case Slot::CONTEXT: {
412 // Follow the context chain if necessary.
413 ASSERT(!tmp.is(cp)); // do not overwrite context register
414 Register context = cp;
415 int chain_length = scope()->ContextChainLength(slot->var()->scope());
416 for (int i = 0; i < chain_length; i++) {
417 // Load the closure.
418 // (All contexts, even 'with' contexts, have a closure,
419 // and it is the same for all contexts inside a function.
420 // There is no need to go to the function context first.)
421 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
422 // Load the function context (which is the incoming, outer context).
423 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
424 context = tmp;
425 }
426 // We may have a 'with' context now. Get the function context.
427 // (In fact this mov may never be the needed, since the scope analysis
428 // may not permit a direct context access in this case and thus we are
429 // always at a function context. However it is safe to dereference be-
430 // cause the function context of a function context is itself. Before
431 // deleting this mov we should try to create a counter-example first,
432 // though...)
433 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
434 return ContextOperand(tmp, index);
435 }
436
437 default:
438 UNREACHABLE();
439 return MemOperand(r0, 0);
440 }
441}
442
443
444MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
445 Slot* slot,
446 Register tmp,
447 Register tmp2,
448 JumpTarget* slow) {
449 ASSERT(slot->type() == Slot::CONTEXT);
450 Register context = cp;
451
452 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
453 if (s->num_heap_slots() > 0) {
454 if (s->calls_eval()) {
455 // Check that extension is NULL.
456 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
457 __ tst(tmp2, tmp2);
458 slow->Branch(ne);
459 }
460 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
461 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
462 context = tmp;
463 }
464 }
465 // Check that last extension is NULL.
466 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
467 __ tst(tmp2, tmp2);
468 slow->Branch(ne);
469 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
470 return ContextOperand(tmp, slot->index());
471}
472
473
474// Loads a value on TOS. If it is a boolean value, the result may have been
475// (partially) translated into branches, or it may have set the condition
476// code register. If force_cc is set, the value is forced to set the
477// condition code register and no value is pushed. If the condition code
478// register was set, has_cc() is true and cc_reg_ contains the condition to
479// test for 'true'.
480void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 JumpTarget* true_target,
482 JumpTarget* false_target,
483 bool force_cc) {
484 ASSERT(!has_cc());
485 int original_height = frame_->height();
486
Steve Blockd0582a62009-12-15 09:54:21 +0000487 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000488 Visit(x);
489
490 // If we hit a stack overflow, we may not have actually visited
491 // the expression. In that case, we ensure that we have a
492 // valid-looking frame state because we will continue to generate
493 // code as we unwind the C++ stack.
494 //
495 // It's possible to have both a stack overflow and a valid frame
496 // state (eg, a subexpression overflowed, visiting it returned
497 // with a dummied frame state, and visiting this expression
498 // returned with a normal-looking state).
499 if (HasStackOverflow() &&
500 has_valid_frame() &&
501 !has_cc() &&
502 frame_->height() == original_height) {
Steve Block6ded16b2010-05-10 14:33:55 +0100503 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000504 true_target->Jump();
505 }
506 }
507 if (force_cc && frame_ != NULL && !has_cc()) {
508 // Convert the TOS value to a boolean in the condition code register.
509 ToBoolean(true_target, false_target);
510 }
511 ASSERT(!force_cc || !has_valid_frame() || has_cc());
512 ASSERT(!has_valid_frame() ||
513 (has_cc() && frame_->height() == original_height) ||
514 (!has_cc() && frame_->height() == original_height + 1));
515}
516
517
Steve Blockd0582a62009-12-15 09:54:21 +0000518void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000519#ifdef DEBUG
520 int original_height = frame_->height();
521#endif
522 JumpTarget true_target;
523 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000524 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000525
526 if (has_cc()) {
527 // Convert cc_reg_ into a boolean value.
Steve Block6ded16b2010-05-10 14:33:55 +0100528 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000529 JumpTarget loaded;
530 JumpTarget materialize_true;
531 materialize_true.Branch(cc_reg_);
532 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
533 frame_->EmitPush(r0);
534 loaded.Jump();
535 materialize_true.Bind();
536 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
537 frame_->EmitPush(r0);
538 loaded.Bind();
539 cc_reg_ = al;
540 }
541
542 if (true_target.is_linked() || false_target.is_linked()) {
Steve Block6ded16b2010-05-10 14:33:55 +0100543 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000544 // We have at least one condition value that has been "translated"
545 // into a branch, thus it needs to be loaded explicitly.
546 JumpTarget loaded;
547 if (frame_ != NULL) {
548 loaded.Jump(); // Don't lose the current TOS.
549 }
550 bool both = true_target.is_linked() && false_target.is_linked();
551 // Load "true" if necessary.
552 if (true_target.is_linked()) {
553 true_target.Bind();
554 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
555 frame_->EmitPush(r0);
556 }
557 // If both "true" and "false" need to be loaded jump across the code for
558 // "false".
559 if (both) {
560 loaded.Jump();
561 }
562 // Load "false" if necessary.
563 if (false_target.is_linked()) {
564 false_target.Bind();
565 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
566 frame_->EmitPush(r0);
567 }
568 // A value is loaded on all paths reaching this point.
569 loaded.Bind();
570 }
571 ASSERT(has_valid_frame());
572 ASSERT(!has_cc());
Steve Block6ded16b2010-05-10 14:33:55 +0100573 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +0000574}
575
576
577void CodeGenerator::LoadGlobal() {
Steve Block6ded16b2010-05-10 14:33:55 +0100578 Register reg = frame_->GetTOSRegister();
579 __ ldr(reg, GlobalObject());
580 frame_->EmitPush(reg);
Steve Blocka7e24c12009-10-30 11:49:00 +0000581}
582
583
584void CodeGenerator::LoadGlobalReceiver(Register scratch) {
Steve Block6ded16b2010-05-10 14:33:55 +0100585 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000586 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
587 __ ldr(scratch,
588 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
589 frame_->EmitPush(scratch);
590}
591
592
Steve Block6ded16b2010-05-10 14:33:55 +0100593ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
594 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
595 ASSERT(scope()->arguments_shadow() != NULL);
596 // We don't want to do lazy arguments allocation for functions that
597 // have heap-allocated contexts, because it interfers with the
598 // uninitialized const tracking in the context objects.
599 return (scope()->num_heap_slots() > 0)
600 ? EAGER_ARGUMENTS_ALLOCATION
601 : LAZY_ARGUMENTS_ALLOCATION;
602}
603
604
605void CodeGenerator::StoreArgumentsObject(bool initial) {
606 VirtualFrame::SpilledScope spilled_scope(frame_);
607
608 ArgumentsAllocationMode mode = ArgumentsMode();
609 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
610
611 Comment cmnt(masm_, "[ store arguments object");
612 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
613 // When using lazy arguments allocation, we store the hole value
614 // as a sentinel indicating that the arguments object hasn't been
615 // allocated yet.
616 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
617 frame_->EmitPush(ip);
618 } else {
619 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
620 __ ldr(r2, frame_->Function());
621 // The receiver is below the arguments, the return address, and the
622 // frame pointer on the stack.
623 const int kReceiverDisplacement = 2 + scope()->num_parameters();
624 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
625 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
626 frame_->Adjust(3);
627 __ Push(r2, r1, r0);
628 frame_->CallStub(&stub, 3);
629 frame_->EmitPush(r0);
630 }
631
632 Variable* arguments = scope()->arguments()->var();
633 Variable* shadow = scope()->arguments_shadow()->var();
634 ASSERT(arguments != NULL && arguments->slot() != NULL);
635 ASSERT(shadow != NULL && shadow->slot() != NULL);
636 JumpTarget done;
637 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
638 // We have to skip storing into the arguments slot if it has
639 // already been written to. This can happen if the a function
640 // has a local variable named 'arguments'.
641 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
642 frame_->EmitPop(r0);
643 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
644 __ cmp(r0, ip);
645 done.Branch(ne);
646 }
647 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
648 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
649 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
650}
651
652
Steve Blockd0582a62009-12-15 09:54:21 +0000653void CodeGenerator::LoadTypeofExpression(Expression* expr) {
654 // Special handling of identifiers as subexpressions of typeof.
Steve Block6ded16b2010-05-10 14:33:55 +0100655 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +0000656 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000657 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000658 // For a global variable we build the property reference
659 // <global>.<variable> and perform a (regular non-contextual) property
660 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000661 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
662 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000663 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000664 Reference ref(this, &property);
Steve Block6ded16b2010-05-10 14:33:55 +0100665 ref.GetValue();
Steve Blockd0582a62009-12-15 09:54:21 +0000666 } else if (variable != NULL && variable->slot() != NULL) {
667 // For a variable that rewrites to a slot, we signal it is the immediate
668 // subexpression of a typeof.
Steve Block6ded16b2010-05-10 14:33:55 +0100669 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
Steve Blockd0582a62009-12-15 09:54:21 +0000670 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000671 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000672 // Anything else can be handled normally.
673 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000674 }
675}
676
677
Leon Clarked91b9f72010-01-27 17:25:45 +0000678Reference::Reference(CodeGenerator* cgen,
679 Expression* expression,
680 bool persist_after_get)
681 : cgen_(cgen),
682 expression_(expression),
683 type_(ILLEGAL),
684 persist_after_get_(persist_after_get) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000685 cgen->LoadReference(this);
686}
687
688
689Reference::~Reference() {
Leon Clarked91b9f72010-01-27 17:25:45 +0000690 ASSERT(is_unloaded() || is_illegal());
Steve Blocka7e24c12009-10-30 11:49:00 +0000691}
692
693
694void CodeGenerator::LoadReference(Reference* ref) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000695 Comment cmnt(masm_, "[ LoadReference");
696 Expression* e = ref->expression();
697 Property* property = e->AsProperty();
698 Variable* var = e->AsVariableProxy()->AsVariable();
699
700 if (property != NULL) {
701 // The expression is either a property or a variable proxy that rewrites
702 // to a property.
Steve Block6ded16b2010-05-10 14:33:55 +0100703 Load(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000704 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 ref->set_type(Reference::NAMED);
706 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100707 Load(property->key());
Steve Blocka7e24c12009-10-30 11:49:00 +0000708 ref->set_type(Reference::KEYED);
709 }
710 } else if (var != NULL) {
711 // The expression is a variable proxy that does not rewrite to a
712 // property. Global variables are treated as named property references.
713 if (var->is_global()) {
714 LoadGlobal();
715 ref->set_type(Reference::NAMED);
716 } else {
717 ASSERT(var->slot() != NULL);
718 ref->set_type(Reference::SLOT);
719 }
720 } else {
721 // Anything else is a runtime error.
Steve Block6ded16b2010-05-10 14:33:55 +0100722 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000723 LoadAndSpill(e);
724 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
725 }
726}
727
728
729void CodeGenerator::UnloadReference(Reference* ref) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000730 int size = ref->size();
Leon Clarked91b9f72010-01-27 17:25:45 +0000731 ref->set_unloaded();
Steve Block6ded16b2010-05-10 14:33:55 +0100732 if (size == 0) return;
733
734 // Pop a reference from the stack while preserving TOS.
735 VirtualFrame::RegisterAllocationScope scope(this);
736 Comment cmnt(masm_, "[ UnloadReference");
737 if (size > 0) {
738 Register tos = frame_->PopToRegister();
739 frame_->Drop(size);
740 frame_->EmitPush(tos);
741 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000742}
743
744
745// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
746// register to a boolean in the condition code register. The code
747// may jump to 'false_target' in case the register converts to 'false'.
748void CodeGenerator::ToBoolean(JumpTarget* true_target,
749 JumpTarget* false_target) {
Steve Block6ded16b2010-05-10 14:33:55 +0100750 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000751 // Note: The generated code snippet does not change stack variables.
752 // Only the condition code should be set.
753 frame_->EmitPop(r0);
754
755 // Fast case checks
756
757 // Check if the value is 'false'.
758 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
759 __ cmp(r0, ip);
760 false_target->Branch(eq);
761
762 // Check if the value is 'true'.
763 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
764 __ cmp(r0, ip);
765 true_target->Branch(eq);
766
767 // Check if the value is 'undefined'.
768 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
769 __ cmp(r0, ip);
770 false_target->Branch(eq);
771
772 // Check if the value is a smi.
773 __ cmp(r0, Operand(Smi::FromInt(0)));
774 false_target->Branch(eq);
775 __ tst(r0, Operand(kSmiTagMask));
776 true_target->Branch(eq);
777
778 // Slow case: call the runtime.
779 frame_->EmitPush(r0);
780 frame_->CallRuntime(Runtime::kToBool, 1);
781 // Convert the result (r0) to a condition code.
782 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
783 __ cmp(r0, ip);
784
785 cc_reg_ = ne;
786}
787
788
789void CodeGenerator::GenericBinaryOperation(Token::Value op,
790 OverwriteMode overwrite_mode,
791 int constant_rhs) {
Steve Block6ded16b2010-05-10 14:33:55 +0100792 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 // sp[0] : y
794 // sp[1] : x
795 // result : r0
796
797 // Stub is entered with a call: 'return address' is in lr.
798 switch (op) {
Steve Block6ded16b2010-05-10 14:33:55 +0100799 case Token::ADD:
800 case Token::SUB:
801 case Token::MUL:
802 case Token::DIV:
803 case Token::MOD:
804 case Token::BIT_OR:
805 case Token::BIT_AND:
806 case Token::BIT_XOR:
807 case Token::SHL:
808 case Token::SHR:
809 case Token::SAR: {
810 frame_->EmitPop(r0); // r0 : y
811 frame_->EmitPop(r1); // r1 : x
812 GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
813 frame_->CallStub(&stub, 0);
814 break;
815 }
816
817 case Token::COMMA:
818 frame_->EmitPop(r0);
819 // Simply discard left value.
820 frame_->Drop();
821 break;
822
823 default:
824 // Other cases should have been handled before this point.
825 UNREACHABLE();
826 break;
827 }
828}
829
830
831void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
832 OverwriteMode overwrite_mode,
833 int constant_rhs) {
834 // top of virtual frame: y
835 // 2nd elt. on virtual frame : x
836 // result : top of virtual frame
837
838 // Stub is entered with a call: 'return address' is in lr.
839 switch (op) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 case Token::ADD: // fall through.
841 case Token::SUB: // fall through.
842 case Token::MUL:
843 case Token::DIV:
844 case Token::MOD:
845 case Token::BIT_OR:
846 case Token::BIT_AND:
847 case Token::BIT_XOR:
848 case Token::SHL:
849 case Token::SHR:
850 case Token::SAR: {
Steve Block6ded16b2010-05-10 14:33:55 +0100851 Register rhs = frame_->PopToRegister();
852 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
853 {
854 VirtualFrame::SpilledScope spilled_scope(frame_);
855 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
856 frame_->CallStub(&stub, 0);
857 }
858 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000859 break;
860 }
861
Steve Block6ded16b2010-05-10 14:33:55 +0100862 case Token::COMMA: {
863 Register scratch = frame_->PopToRegister();
864 // Simply discard left value.
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 frame_->Drop();
Steve Block6ded16b2010-05-10 14:33:55 +0100866 frame_->EmitPush(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 break;
Steve Block6ded16b2010-05-10 14:33:55 +0100868 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000869
870 default:
871 // Other cases should have been handled before this point.
872 UNREACHABLE();
873 break;
874 }
875}
876
877
878class DeferredInlineSmiOperation: public DeferredCode {
879 public:
880 DeferredInlineSmiOperation(Token::Value op,
881 int value,
882 bool reversed,
Steve Block6ded16b2010-05-10 14:33:55 +0100883 OverwriteMode overwrite_mode,
884 Register tos)
Steve Blocka7e24c12009-10-30 11:49:00 +0000885 : op_(op),
886 value_(value),
887 reversed_(reversed),
Steve Block6ded16b2010-05-10 14:33:55 +0100888 overwrite_mode_(overwrite_mode),
889 tos_register_(tos) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000890 set_comment("[ DeferredInlinedSmiOperation");
891 }
892
893 virtual void Generate();
894
895 private:
896 Token::Value op_;
897 int value_;
898 bool reversed_;
899 OverwriteMode overwrite_mode_;
Steve Block6ded16b2010-05-10 14:33:55 +0100900 Register tos_register_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000901};
902
903
904void DeferredInlineSmiOperation::Generate() {
Steve Block6ded16b2010-05-10 14:33:55 +0100905 Register lhs = r1;
906 Register rhs = r0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000907 switch (op_) {
908 case Token::ADD: {
909 // Revert optimistic add.
910 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100911 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 __ mov(r1, Operand(Smi::FromInt(value_)));
913 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100914 __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000915 __ mov(r0, Operand(Smi::FromInt(value_)));
916 }
917 break;
918 }
919
920 case Token::SUB: {
921 // Revert optimistic sub.
922 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100923 __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000924 __ mov(r1, Operand(Smi::FromInt(value_)));
925 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100926 __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000927 __ mov(r0, Operand(Smi::FromInt(value_)));
928 }
929 break;
930 }
931
932 // For these operations there is no optimistic operation that needs to be
933 // reverted.
934 case Token::MUL:
935 case Token::MOD:
936 case Token::BIT_OR:
937 case Token::BIT_XOR:
938 case Token::BIT_AND: {
939 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100940 if (tos_register_.is(r0)) {
941 __ mov(r1, Operand(Smi::FromInt(value_)));
942 } else {
943 ASSERT(tos_register_.is(r1));
944 __ mov(r0, Operand(Smi::FromInt(value_)));
945 lhs = r0;
946 rhs = r1;
947 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000948 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100949 if (tos_register_.is(r1)) {
950 __ mov(r0, Operand(Smi::FromInt(value_)));
951 } else {
952 ASSERT(tos_register_.is(r0));
953 __ mov(r1, Operand(Smi::FromInt(value_)));
954 lhs = r0;
955 rhs = r1;
956 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000957 }
958 break;
959 }
960
961 case Token::SHL:
962 case Token::SHR:
963 case Token::SAR: {
964 if (!reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100965 if (tos_register_.is(r1)) {
966 __ mov(r0, Operand(Smi::FromInt(value_)));
967 } else {
968 ASSERT(tos_register_.is(r0));
969 __ mov(r1, Operand(Smi::FromInt(value_)));
970 lhs = r0;
971 rhs = r1;
972 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000973 } else {
974 UNREACHABLE(); // Should have been handled in SmiOperation.
975 }
976 break;
977 }
978
979 default:
980 // Other cases should have been handled before this point.
981 UNREACHABLE();
982 break;
983 }
984
Steve Block6ded16b2010-05-10 14:33:55 +0100985 GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 __ CallStub(&stub);
Steve Block6ded16b2010-05-10 14:33:55 +0100987 // The generic stub returns its value in r0, but that's not
988 // necessarily what we want. We want whatever the inlined code
989 // expected, which is that the answer is in the same register as
990 // the operand was.
991 __ Move(tos_register_, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000992}
993
994
995static bool PopCountLessThanEqual2(unsigned int x) {
996 x &= x - 1;
997 return (x & (x - 1)) == 0;
998}
999
1000
1001// Returns the index of the lowest bit set.
1002static int BitPosition(unsigned x) {
1003 int bit_posn = 0;
1004 while ((x & 0xf) == 0) {
1005 bit_posn += 4;
1006 x >>= 4;
1007 }
1008 while ((x & 1) == 0) {
1009 bit_posn++;
1010 x >>= 1;
1011 }
1012 return bit_posn;
1013}
1014
1015
1016void CodeGenerator::SmiOperation(Token::Value op,
1017 Handle<Object> value,
1018 bool reversed,
1019 OverwriteMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 int int_value = Smi::cast(*value)->value();
1021
Steve Block6ded16b2010-05-10 14:33:55 +01001022 bool something_to_inline;
1023 switch (op) {
1024 case Token::ADD:
1025 case Token::SUB:
1026 case Token::BIT_AND:
1027 case Token::BIT_OR:
1028 case Token::BIT_XOR: {
1029 something_to_inline = true;
1030 break;
1031 }
1032 case Token::SHL:
1033 case Token::SHR:
1034 case Token::SAR: {
1035 if (reversed) {
1036 something_to_inline = false;
1037 } else {
1038 something_to_inline = true;
1039 }
1040 break;
1041 }
1042 case Token::MOD: {
1043 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1044 something_to_inline = false;
1045 } else {
1046 something_to_inline = true;
1047 }
1048 break;
1049 }
1050 case Token::MUL: {
1051 if (!IsEasyToMultiplyBy(int_value)) {
1052 something_to_inline = false;
1053 } else {
1054 something_to_inline = true;
1055 }
1056 break;
1057 }
1058 default: {
1059 something_to_inline = false;
1060 break;
1061 }
1062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001063
Steve Block6ded16b2010-05-10 14:33:55 +01001064 if (!something_to_inline) {
1065 if (!reversed) {
1066 // Push the rhs onto the virtual frame by putting it in a TOS register.
1067 Register rhs = frame_->GetTOSRegister();
1068 __ mov(rhs, Operand(value));
1069 frame_->EmitPush(rhs);
1070 VirtualFrameBinaryOperation(op, mode, int_value);
1071 } else {
1072 // Pop the rhs, then push lhs and rhs in the right order. Only performs
1073 // at most one pop, the rest takes place in TOS registers.
1074 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
1075 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
1076 __ mov(lhs, Operand(value));
1077 frame_->EmitPush(lhs);
1078 frame_->EmitPush(rhs);
1079 VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
1080 }
1081 return;
1082 }
1083
1084 // We move the top of stack to a register (normally no move is invoved).
1085 Register tos = frame_->PopToRegister();
1086 // All other registers are spilled. The deferred code expects one argument
1087 // in a register and all other values are flushed to the stack. The
1088 // answer is returned in the same register that the top of stack argument was
1089 // in.
1090 frame_->SpillAll();
1091
Steve Blocka7e24c12009-10-30 11:49:00 +00001092 switch (op) {
1093 case Token::ADD: {
1094 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001095 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001096
Steve Block6ded16b2010-05-10 14:33:55 +01001097 __ add(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001098 deferred->Branch(vs);
Steve Block6ded16b2010-05-10 14:33:55 +01001099 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001100 deferred->Branch(ne);
1101 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001102 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001103 break;
1104 }
1105
1106 case Token::SUB: {
1107 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001108 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001109
1110 if (reversed) {
Steve Block6ded16b2010-05-10 14:33:55 +01001111 __ rsb(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001113 __ sub(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001114 }
1115 deferred->Branch(vs);
Steve Block6ded16b2010-05-10 14:33:55 +01001116 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001117 deferred->Branch(ne);
1118 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001119 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001120 break;
1121 }
1122
1123
1124 case Token::BIT_OR:
1125 case Token::BIT_XOR:
1126 case Token::BIT_AND: {
1127 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001128 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1129 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001130 deferred->Branch(ne);
1131 switch (op) {
Steve Block6ded16b2010-05-10 14:33:55 +01001132 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1133 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1134 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001135 default: UNREACHABLE();
1136 }
1137 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001138 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001139 break;
1140 }
1141
1142 case Token::SHL:
1143 case Token::SHR:
1144 case Token::SAR: {
Steve Block6ded16b2010-05-10 14:33:55 +01001145 ASSERT(!reversed);
1146 Register scratch = VirtualFrame::scratch0();
1147 Register scratch2 = VirtualFrame::scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00001148 int shift_value = int_value & 0x1f; // least significant 5 bits
1149 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001150 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
Kristian Monsen25f61362010-05-21 11:50:48 +01001151 uint32_t problematic_mask = kSmiTagMask;
1152 // For unsigned shift by zero all negative smis are problematic.
1153 if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
1154 __ tst(tos, Operand(problematic_mask));
1155 deferred->Branch(ne); // Go slow for problematic input.
Steve Blocka7e24c12009-10-30 11:49:00 +00001156 switch (op) {
1157 case Token::SHL: {
1158 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001159 int adjusted_shift = shift_value - kSmiTagSize;
1160 ASSERT(adjusted_shift >= 0);
1161 if (adjusted_shift != 0) {
1162 __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1163 // Check that the *signed* result fits in a smi.
1164 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1165 deferred->Branch(mi);
1166 __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1167 } else {
1168 // Check that the *signed* result fits in a smi.
1169 __ add(scratch2, tos, Operand(0x40000000), SetCC);
1170 deferred->Branch(mi);
1171 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1172 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001173 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001174 break;
1175 }
1176 case Token::SHR: {
Steve Blocka7e24c12009-10-30 11:49:00 +00001177 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001178 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
1179 // LSR by immediate 0 means shifting 32 bits.
Steve Block6ded16b2010-05-10 14:33:55 +01001180 __ mov(scratch, Operand(scratch, LSR, shift_value));
Kristian Monsen25f61362010-05-21 11:50:48 +01001181 if (shift_value == 1) {
1182 // check that the *unsigned* result fits in a smi
1183 // neither of the two high-order bits can be set:
1184 // - 0x80000000: high bit would be lost when smi tagging
1185 // - 0x40000000: this number would convert to negative when
1186 // smi tagging these two cases can only happen with shifts
1187 // by 0 or 1 when handed a valid smi
1188 __ tst(scratch, Operand(0xc0000000));
1189 deferred->Branch(ne);
1190 }
1191 __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001192 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001193 break;
1194 }
1195 case Token::SAR: {
Kristian Monsen25f61362010-05-21 11:50:48 +01001196 // In the ARM instructions set, ASR by immediate 0 means shifting 32
1197 // bits.
Steve Blocka7e24c12009-10-30 11:49:00 +00001198 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001199 // Do the shift and the tag removal in one operation. If the shift
1200 // is 31 bits (the highest possible value) then we emit the
1201 // instruction as a shift by 0 which means shift arithmetically by
1202 // 32.
1203 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1204 // Put tag back.
1205 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001206 }
1207 break;
1208 }
1209 default: UNREACHABLE();
1210 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001212 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001213 break;
1214 }
1215
1216 case Token::MOD: {
Steve Block6ded16b2010-05-10 14:33:55 +01001217 ASSERT(!reversed);
1218 ASSERT(int_value >= 2);
1219 ASSERT(IsPowerOf2(int_value));
Steve Blocka7e24c12009-10-30 11:49:00 +00001220 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001221 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001222 unsigned mask = (0x80000000u | kSmiTagMask);
Steve Block6ded16b2010-05-10 14:33:55 +01001223 __ tst(tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001224 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1225 mask = (int_value << kSmiTagSize) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +01001226 __ and_(tos, tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001227 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001228 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001229 break;
1230 }
1231
1232 case Token::MUL: {
Steve Block6ded16b2010-05-10 14:33:55 +01001233 ASSERT(IsEasyToMultiplyBy(int_value));
Steve Blocka7e24c12009-10-30 11:49:00 +00001234 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001235 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001236 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1237 max_smi_that_wont_overflow <<= kSmiTagSize;
1238 unsigned mask = 0x80000000u;
1239 while ((mask & max_smi_that_wont_overflow) == 0) {
1240 mask |= mask >> 1;
1241 }
1242 mask |= kSmiTagMask;
1243 // This does a single mask that checks for a too high value in a
1244 // conservative way and for a non-Smi. It also filters out negative
1245 // numbers, unfortunately, but since this code is inline we prefer
1246 // brevity to comprehensiveness.
Steve Block6ded16b2010-05-10 14:33:55 +01001247 __ tst(tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001248 deferred->Branch(ne);
Steve Block6ded16b2010-05-10 14:33:55 +01001249 MultiplyByKnownInt(masm_, tos, tos, int_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001251 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001252 break;
1253 }
1254
1255 default:
Steve Block6ded16b2010-05-10 14:33:55 +01001256 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00001257 break;
1258 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001259}
1260
1261
1262void CodeGenerator::Comparison(Condition cc,
1263 Expression* left,
1264 Expression* right,
1265 bool strict) {
Steve Block6ded16b2010-05-10 14:33:55 +01001266 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001267
Steve Block6ded16b2010-05-10 14:33:55 +01001268 if (left != NULL) Load(left);
1269 if (right != NULL) Load(right);
1270
Steve Blocka7e24c12009-10-30 11:49:00 +00001271 // sp[0] : y
1272 // sp[1] : x
1273 // result : cc register
1274
1275 // Strict only makes sense for equality comparisons.
1276 ASSERT(!strict || cc == eq);
1277
Steve Block6ded16b2010-05-10 14:33:55 +01001278 Register lhs;
1279 Register rhs;
1280
1281 // We load the top two stack positions into registers chosen by the virtual
1282 // frame. This should keep the register shuffling to a minimum.
Steve Blocka7e24c12009-10-30 11:49:00 +00001283 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1284 if (cc == gt || cc == le) {
1285 cc = ReverseCondition(cc);
Steve Block6ded16b2010-05-10 14:33:55 +01001286 lhs = frame_->PopToRegister();
1287 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
Steve Blocka7e24c12009-10-30 11:49:00 +00001288 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001289 rhs = frame_->PopToRegister();
1290 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
Steve Blocka7e24c12009-10-30 11:49:00 +00001291 }
Steve Block6ded16b2010-05-10 14:33:55 +01001292
1293 ASSERT(rhs.is(r0) || rhs.is(r1));
1294 ASSERT(lhs.is(r0) || lhs.is(r1));
1295
1296 // Now we have the two sides in r0 and r1. We flush any other registers
1297 // because the stub doesn't know about register allocation.
1298 frame_->SpillAll();
1299 Register scratch = VirtualFrame::scratch0();
1300 __ orr(scratch, lhs, Operand(rhs));
1301 __ tst(scratch, Operand(kSmiTagMask));
1302 JumpTarget smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00001303 smi.Branch(eq);
1304
1305 // Perform non-smi comparison by stub.
1306 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1307 // We call with 0 args because there are 0 on the stack.
Steve Block6ded16b2010-05-10 14:33:55 +01001308 if (!rhs.is(r0)) {
1309 __ Swap(rhs, lhs, ip);
1310 }
1311
Steve Blocka7e24c12009-10-30 11:49:00 +00001312 CompareStub stub(cc, strict);
1313 frame_->CallStub(&stub, 0);
1314 __ cmp(r0, Operand(0));
Steve Block6ded16b2010-05-10 14:33:55 +01001315 JumpTarget exit;
Steve Blocka7e24c12009-10-30 11:49:00 +00001316 exit.Jump();
1317
1318 // Do smi comparisons by pointer comparison.
1319 smi.Bind();
Steve Block6ded16b2010-05-10 14:33:55 +01001320 __ cmp(lhs, Operand(rhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00001321
1322 exit.Bind();
1323 cc_reg_ = cc;
1324}
1325
1326
Steve Blocka7e24c12009-10-30 11:49:00 +00001327// Call the function on the stack with the given arguments.
1328void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001329 CallFunctionFlags flags,
1330 int position) {
Steve Block6ded16b2010-05-10 14:33:55 +01001331 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001332 // Push the arguments ("left-to-right") on the stack.
1333 int arg_count = args->length();
1334 for (int i = 0; i < arg_count; i++) {
1335 LoadAndSpill(args->at(i));
1336 }
1337
1338 // Record the position for debugging purposes.
1339 CodeForSourcePosition(position);
1340
1341 // Use the shared code stub to call the function.
1342 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001343 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001344 frame_->CallStub(&call_function, arg_count + 1);
1345
1346 // Restore context and pop function from the stack.
1347 __ ldr(cp, frame_->Context());
1348 frame_->Drop(); // discard the TOS
1349}
1350
1351
Steve Block6ded16b2010-05-10 14:33:55 +01001352void CodeGenerator::CallApplyLazy(Expression* applicand,
1353 Expression* receiver,
1354 VariableProxy* arguments,
1355 int position) {
1356 // An optimized implementation of expressions of the form
1357 // x.apply(y, arguments).
1358 // If the arguments object of the scope has not been allocated,
1359 // and x.apply is Function.prototype.apply, this optimization
1360 // just copies y and the arguments of the current function on the
1361 // stack, as receiver and arguments, and calls x.
1362 // In the implementation comments, we call x the applicand
1363 // and y the receiver.
1364 VirtualFrame::SpilledScope spilled_scope(frame_);
1365
1366 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1367 ASSERT(arguments->IsArguments());
1368
1369 // Load applicand.apply onto the stack. This will usually
1370 // give us a megamorphic load site. Not super, but it works.
1371 LoadAndSpill(applicand);
1372 Handle<String> name = Factory::LookupAsciiSymbol("apply");
Leon Clarkef7060e22010-06-03 12:02:55 +01001373 frame_->Dup();
Steve Block6ded16b2010-05-10 14:33:55 +01001374 frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1375 frame_->EmitPush(r0);
1376
1377 // Load the receiver and the existing arguments object onto the
1378 // expression stack. Avoid allocating the arguments object here.
1379 LoadAndSpill(receiver);
1380 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
1381
1382 // Emit the source position information after having loaded the
1383 // receiver and the arguments.
1384 CodeForSourcePosition(position);
1385 // Contents of the stack at this point:
1386 // sp[0]: arguments object of the current function or the hole.
1387 // sp[1]: receiver
1388 // sp[2]: applicand.apply
1389 // sp[3]: applicand.
1390
1391 // Check if the arguments object has been lazily allocated
1392 // already. If so, just use that instead of copying the arguments
1393 // from the stack. This also deals with cases where a local variable
1394 // named 'arguments' has been introduced.
1395 __ ldr(r0, MemOperand(sp, 0));
1396
1397 Label slow, done;
1398 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1399 __ cmp(ip, r0);
1400 __ b(ne, &slow);
1401
1402 Label build_args;
1403 // Get rid of the arguments object probe.
1404 frame_->Drop();
1405 // Stack now has 3 elements on it.
1406 // Contents of stack at this point:
1407 // sp[0]: receiver
1408 // sp[1]: applicand.apply
1409 // sp[2]: applicand.
1410
1411 // Check that the receiver really is a JavaScript object.
1412 __ ldr(r0, MemOperand(sp, 0));
1413 __ BranchOnSmi(r0, &build_args);
1414 // We allow all JSObjects including JSFunctions. As long as
1415 // JS_FUNCTION_TYPE is the last instance type and it is right
1416 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1417 // bound.
1418 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1419 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1420 __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
1421 __ b(lt, &build_args);
1422
1423 // Check that applicand.apply is Function.prototype.apply.
1424 __ ldr(r0, MemOperand(sp, kPointerSize));
1425 __ BranchOnSmi(r0, &build_args);
1426 __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1427 __ b(ne, &build_args);
1428 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
1429 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1430 __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
1431 __ cmp(r1, Operand(apply_code));
1432 __ b(ne, &build_args);
1433
1434 // Check that applicand is a function.
1435 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1436 __ BranchOnSmi(r1, &build_args);
1437 __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1438 __ b(ne, &build_args);
1439
1440 // Copy the arguments to this function possibly from the
1441 // adaptor frame below it.
1442 Label invoke, adapted;
1443 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1444 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1445 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1446 __ b(eq, &adapted);
1447
1448 // No arguments adaptor frame. Copy fixed number of arguments.
1449 __ mov(r0, Operand(scope()->num_parameters()));
1450 for (int i = 0; i < scope()->num_parameters(); i++) {
1451 __ ldr(r2, frame_->ParameterAt(i));
1452 __ push(r2);
1453 }
1454 __ jmp(&invoke);
1455
1456 // Arguments adaptor frame present. Copy arguments from there, but
1457 // avoid copying too many arguments to avoid stack overflows.
1458 __ bind(&adapted);
1459 static const uint32_t kArgumentsLimit = 1 * KB;
1460 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1461 __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1462 __ mov(r3, r0);
1463 __ cmp(r0, Operand(kArgumentsLimit));
1464 __ b(gt, &build_args);
1465
1466 // Loop through the arguments pushing them onto the execution
1467 // stack. We don't inform the virtual frame of the push, so we don't
1468 // have to worry about getting rid of the elements from the virtual
1469 // frame.
1470 Label loop;
1471 // r3 is a small non-negative integer, due to the test above.
1472 __ cmp(r3, Operand(0));
1473 __ b(eq, &invoke);
1474 // Compute the address of the first argument.
1475 __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1476 __ add(r2, r2, Operand(kPointerSize));
1477 __ bind(&loop);
1478 // Post-decrement argument address by kPointerSize on each iteration.
1479 __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1480 __ push(r4);
1481 __ sub(r3, r3, Operand(1), SetCC);
1482 __ b(gt, &loop);
1483
1484 // Invoke the function.
1485 __ bind(&invoke);
1486 ParameterCount actual(r0);
1487 __ InvokeFunction(r1, actual, CALL_FUNCTION);
1488 // Drop applicand.apply and applicand from the stack, and push
1489 // the result of the function call, but leave the spilled frame
1490 // unchanged, with 3 elements, so it is correct when we compile the
1491 // slow-case code.
1492 __ add(sp, sp, Operand(2 * kPointerSize));
1493 __ push(r0);
1494 // Stack now has 1 element:
1495 // sp[0]: result
1496 __ jmp(&done);
1497
1498 // Slow-case: Allocate the arguments object since we know it isn't
1499 // there, and fall-through to the slow-case where we call
1500 // applicand.apply.
1501 __ bind(&build_args);
1502 // Stack now has 3 elements, because we have jumped from where:
1503 // sp[0]: receiver
1504 // sp[1]: applicand.apply
1505 // sp[2]: applicand.
1506 StoreArgumentsObject(false);
1507
1508 // Stack and frame now have 4 elements.
1509 __ bind(&slow);
1510
1511 // Generic computation of x.apply(y, args) with no special optimization.
1512 // Flip applicand.apply and applicand on the stack, so
1513 // applicand looks like the receiver of the applicand.apply call.
1514 // Then process it as a normal function call.
1515 __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1516 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
Leon Clarkef7060e22010-06-03 12:02:55 +01001517 __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01001518
1519 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1520 frame_->CallStub(&call_function, 3);
1521 // The function and its two arguments have been dropped.
1522 frame_->Drop(); // Drop the receiver as well.
1523 frame_->EmitPush(r0);
1524 // Stack now has 1 element:
1525 // sp[0]: result
1526 __ bind(&done);
1527
1528 // Restore the context register after a call.
1529 __ ldr(cp, frame_->Context());
1530}
1531
1532
Steve Blocka7e24c12009-10-30 11:49:00 +00001533void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
Steve Block6ded16b2010-05-10 14:33:55 +01001534 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 ASSERT(has_cc());
1536 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1537 target->Branch(cc);
1538 cc_reg_ = al;
1539}
1540
1541
1542void CodeGenerator::CheckStack() {
Steve Block6ded16b2010-05-10 14:33:55 +01001543 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00001544 Comment cmnt(masm_, "[ check stack");
1545 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1546 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1547 // the implicit 8 byte offset that always applies to operations with pc and
1548 // gives a return address 12 bytes down.
1549 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1550 masm_->cmp(sp, Operand(ip));
1551 StackCheckStub stub;
1552 // Call the stub if lower.
1553 masm_->mov(pc,
1554 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1555 RelocInfo::CODE_TARGET),
1556 LeaveCC,
1557 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001558}
1559
1560
1561void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1562#ifdef DEBUG
1563 int original_height = frame_->height();
1564#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001565 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1567 VisitAndSpill(statements->at(i));
1568 }
1569 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1570}
1571
1572
1573void CodeGenerator::VisitBlock(Block* node) {
1574#ifdef DEBUG
1575 int original_height = frame_->height();
1576#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001577 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 Comment cmnt(masm_, "[ Block");
1579 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001580 node->break_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001581 VisitStatementsAndSpill(node->statements());
1582 if (node->break_target()->is_linked()) {
1583 node->break_target()->Bind();
1584 }
1585 node->break_target()->Unuse();
1586 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1587}
1588
1589
1590void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
Steve Block3ce2e202009-11-05 08:53:23 +00001591 frame_->EmitPush(cp);
Steve Block6ded16b2010-05-10 14:33:55 +01001592 frame_->EmitPush(Operand(pairs));
1593 frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1594
1595 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1597 // The result is discarded.
1598}
1599
1600
1601void CodeGenerator::VisitDeclaration(Declaration* node) {
1602#ifdef DEBUG
1603 int original_height = frame_->height();
1604#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00001605 Comment cmnt(masm_, "[ Declaration");
1606 Variable* var = node->proxy()->var();
1607 ASSERT(var != NULL); // must have been resolved
1608 Slot* slot = var->slot();
1609
1610 // If it was not possible to allocate the variable at compile time,
1611 // we need to "declare" it at runtime to make sure it actually
1612 // exists in the local context.
1613 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1614 // Variables with a "LOOKUP" slot were introduced as non-locals
1615 // during variable resolution and must have mode DYNAMIC.
1616 ASSERT(var->is_dynamic());
1617 // For now, just do a runtime call.
1618 frame_->EmitPush(cp);
Steve Block6ded16b2010-05-10 14:33:55 +01001619 frame_->EmitPush(Operand(var->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001620 // Declaration nodes are always declared in only two modes.
1621 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1622 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
Steve Block6ded16b2010-05-10 14:33:55 +01001623 frame_->EmitPush(Operand(Smi::FromInt(attr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00001624 // Push initial value, if any.
1625 // Note: For variables we must not push an initial value (such as
1626 // 'undefined') because we may have a (legal) redeclaration and we
1627 // must not destroy the current value.
1628 if (node->mode() == Variable::CONST) {
Steve Block6ded16b2010-05-10 14:33:55 +01001629 frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00001630 } else if (node->fun() != NULL) {
Steve Block6ded16b2010-05-10 14:33:55 +01001631 Load(node->fun());
Steve Blocka7e24c12009-10-30 11:49:00 +00001632 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001633 frame_->EmitPush(Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001634 }
Steve Block6ded16b2010-05-10 14:33:55 +01001635
1636 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1638 // Ignore the return value (declarations are statements).
Steve Block6ded16b2010-05-10 14:33:55 +01001639
Steve Blocka7e24c12009-10-30 11:49:00 +00001640 ASSERT(frame_->height() == original_height);
1641 return;
1642 }
1643
1644 ASSERT(!var->is_global());
1645
1646 // If we have a function or a constant, we need to initialize the variable.
1647 Expression* val = NULL;
1648 if (node->mode() == Variable::CONST) {
1649 val = new Literal(Factory::the_hole_value());
1650 } else {
1651 val = node->fun(); // NULL if we don't have a function
1652 }
1653
1654 if (val != NULL) {
Steve Block6ded16b2010-05-10 14:33:55 +01001655 // Set initial value.
1656 Reference target(this, node->proxy());
1657 Load(val);
1658 target.SetValue(NOT_CONST_INIT);
1659
Steve Blocka7e24c12009-10-30 11:49:00 +00001660 // Get rid of the assigned value (declarations are statements).
1661 frame_->Drop();
1662 }
1663 ASSERT(frame_->height() == original_height);
1664}
1665
1666
1667void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1668#ifdef DEBUG
1669 int original_height = frame_->height();
1670#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001671 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001672 Comment cmnt(masm_, "[ ExpressionStatement");
1673 CodeForStatementPosition(node);
1674 Expression* expression = node->expression();
1675 expression->MarkAsStatement();
1676 LoadAndSpill(expression);
1677 frame_->Drop();
1678 ASSERT(frame_->height() == original_height);
1679}
1680
1681
1682void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1683#ifdef DEBUG
1684 int original_height = frame_->height();
1685#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001686 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 Comment cmnt(masm_, "// EmptyStatement");
1688 CodeForStatementPosition(node);
1689 // nothing to do
1690 ASSERT(frame_->height() == original_height);
1691}
1692
1693
1694void CodeGenerator::VisitIfStatement(IfStatement* node) {
1695#ifdef DEBUG
1696 int original_height = frame_->height();
1697#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001698 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001699 Comment cmnt(masm_, "[ IfStatement");
1700 // Generate different code depending on which parts of the if statement
1701 // are present or not.
1702 bool has_then_stm = node->HasThenStatement();
1703 bool has_else_stm = node->HasElseStatement();
1704
1705 CodeForStatementPosition(node);
1706
1707 JumpTarget exit;
1708 if (has_then_stm && has_else_stm) {
1709 Comment cmnt(masm_, "[ IfThenElse");
1710 JumpTarget then;
1711 JumpTarget else_;
1712 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001713 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001714 if (frame_ != NULL) {
1715 Branch(false, &else_);
1716 }
1717 // then
1718 if (frame_ != NULL || then.is_linked()) {
1719 then.Bind();
1720 VisitAndSpill(node->then_statement());
1721 }
1722 if (frame_ != NULL) {
1723 exit.Jump();
1724 }
1725 // else
1726 if (else_.is_linked()) {
1727 else_.Bind();
1728 VisitAndSpill(node->else_statement());
1729 }
1730
1731 } else if (has_then_stm) {
1732 Comment cmnt(masm_, "[ IfThen");
1733 ASSERT(!has_else_stm);
1734 JumpTarget then;
1735 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001736 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 if (frame_ != NULL) {
1738 Branch(false, &exit);
1739 }
1740 // then
1741 if (frame_ != NULL || then.is_linked()) {
1742 then.Bind();
1743 VisitAndSpill(node->then_statement());
1744 }
1745
1746 } else if (has_else_stm) {
1747 Comment cmnt(masm_, "[ IfElse");
1748 ASSERT(!has_then_stm);
1749 JumpTarget else_;
1750 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001751 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001752 if (frame_ != NULL) {
1753 Branch(true, &exit);
1754 }
1755 // else
1756 if (frame_ != NULL || else_.is_linked()) {
1757 else_.Bind();
1758 VisitAndSpill(node->else_statement());
1759 }
1760
1761 } else {
1762 Comment cmnt(masm_, "[ If");
1763 ASSERT(!has_then_stm && !has_else_stm);
1764 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001765 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001766 if (frame_ != NULL) {
1767 if (has_cc()) {
1768 cc_reg_ = al;
1769 } else {
1770 frame_->Drop();
1771 }
1772 }
1773 }
1774
1775 // end
1776 if (exit.is_linked()) {
1777 exit.Bind();
1778 }
1779 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1780}
1781
1782
1783void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001784 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001785 Comment cmnt(masm_, "[ ContinueStatement");
1786 CodeForStatementPosition(node);
1787 node->target()->continue_target()->Jump();
1788}
1789
1790
1791void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001792 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001793 Comment cmnt(masm_, "[ BreakStatement");
1794 CodeForStatementPosition(node);
1795 node->target()->break_target()->Jump();
1796}
1797
1798
1799void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001800 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 Comment cmnt(masm_, "[ ReturnStatement");
1802
1803 CodeForStatementPosition(node);
1804 LoadAndSpill(node->expression());
1805 if (function_return_is_shadowed_) {
1806 frame_->EmitPop(r0);
1807 function_return_.Jump();
1808 } else {
1809 // Pop the result from the frame and prepare the frame for
1810 // returning thus making it easier to merge.
1811 frame_->EmitPop(r0);
1812 frame_->PrepareForReturn();
1813
1814 function_return_.Jump();
1815 }
1816}
1817
1818
1819void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1820#ifdef DEBUG
1821 int original_height = frame_->height();
1822#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001823 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001824 Comment cmnt(masm_, "[ WithEnterStatement");
1825 CodeForStatementPosition(node);
1826 LoadAndSpill(node->expression());
1827 if (node->is_catch_block()) {
1828 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1829 } else {
1830 frame_->CallRuntime(Runtime::kPushContext, 1);
1831 }
1832#ifdef DEBUG
1833 JumpTarget verified_true;
Steve Block6ded16b2010-05-10 14:33:55 +01001834 __ cmp(r0, cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001835 verified_true.Branch(eq);
1836 __ stop("PushContext: r0 is expected to be the same as cp");
1837 verified_true.Bind();
1838#endif
1839 // Update context local.
1840 __ str(cp, frame_->Context());
1841 ASSERT(frame_->height() == original_height);
1842}
1843
1844
1845void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1846#ifdef DEBUG
1847 int original_height = frame_->height();
1848#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001849 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001850 Comment cmnt(masm_, "[ WithExitStatement");
1851 CodeForStatementPosition(node);
1852 // Pop context.
1853 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1854 // Update context local.
1855 __ str(cp, frame_->Context());
1856 ASSERT(frame_->height() == original_height);
1857}
1858
1859
1860void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1861#ifdef DEBUG
1862 int original_height = frame_->height();
1863#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001864 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001865 Comment cmnt(masm_, "[ SwitchStatement");
1866 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001867 node->break_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001868
1869 LoadAndSpill(node->tag());
1870
1871 JumpTarget next_test;
1872 JumpTarget fall_through;
1873 JumpTarget default_entry;
1874 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1875 ZoneList<CaseClause*>* cases = node->cases();
1876 int length = cases->length();
1877 CaseClause* default_clause = NULL;
1878
1879 for (int i = 0; i < length; i++) {
1880 CaseClause* clause = cases->at(i);
1881 if (clause->is_default()) {
1882 // Remember the default clause and compile it at the end.
1883 default_clause = clause;
1884 continue;
1885 }
1886
1887 Comment cmnt(masm_, "[ Case clause");
1888 // Compile the test.
1889 next_test.Bind();
1890 next_test.Unuse();
1891 // Duplicate TOS.
1892 __ ldr(r0, frame_->Top());
1893 frame_->EmitPush(r0);
1894 Comparison(eq, NULL, clause->label(), true);
1895 Branch(false, &next_test);
1896
1897 // Before entering the body from the test, remove the switch value from
1898 // the stack.
1899 frame_->Drop();
1900
1901 // Label the body so that fall through is enabled.
1902 if (i > 0 && cases->at(i - 1)->is_default()) {
1903 default_exit.Bind();
1904 } else {
1905 fall_through.Bind();
1906 fall_through.Unuse();
1907 }
1908 VisitStatementsAndSpill(clause->statements());
1909
1910 // If control flow can fall through from the body, jump to the next body
1911 // or the end of the statement.
1912 if (frame_ != NULL) {
1913 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1914 default_entry.Jump();
1915 } else {
1916 fall_through.Jump();
1917 }
1918 }
1919 }
1920
1921 // The final "test" removes the switch value.
1922 next_test.Bind();
1923 frame_->Drop();
1924
1925 // If there is a default clause, compile it.
1926 if (default_clause != NULL) {
1927 Comment cmnt(masm_, "[ Default clause");
1928 default_entry.Bind();
1929 VisitStatementsAndSpill(default_clause->statements());
1930 // If control flow can fall out of the default and there is a case after
1931 // it, jup to that case's body.
1932 if (frame_ != NULL && default_exit.is_bound()) {
1933 default_exit.Jump();
1934 }
1935 }
1936
1937 if (fall_through.is_linked()) {
1938 fall_through.Bind();
1939 }
1940
1941 if (node->break_target()->is_linked()) {
1942 node->break_target()->Bind();
1943 }
1944 node->break_target()->Unuse();
1945 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1946}
1947
1948
Steve Block3ce2e202009-11-05 08:53:23 +00001949void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001950#ifdef DEBUG
1951 int original_height = frame_->height();
1952#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001953 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00001954 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001955 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001956 node->break_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001957 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Block6ded16b2010-05-10 14:33:55 +01001958 IncrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001959
Steve Block3ce2e202009-11-05 08:53:23 +00001960 // Label the top of the loop for the backward CFG edge. If the test
1961 // is always true we can use the continue target, and if the test is
1962 // always false there is no need.
1963 ConditionAnalysis info = AnalyzeCondition(node->cond());
1964 switch (info) {
1965 case ALWAYS_TRUE:
Kristian Monsen25f61362010-05-21 11:50:48 +01001966 node->continue_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001967 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001968 break;
1969 case ALWAYS_FALSE:
Kristian Monsen25f61362010-05-21 11:50:48 +01001970 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001971 break;
1972 case DONT_KNOW:
Kristian Monsen25f61362010-05-21 11:50:48 +01001973 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001974 body.Bind();
1975 break;
1976 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001977
Steve Block3ce2e202009-11-05 08:53:23 +00001978 CheckStack(); // TODO(1222600): ignore if body contains calls.
1979 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001980
Steve Blockd0582a62009-12-15 09:54:21 +00001981 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001982 switch (info) {
1983 case ALWAYS_TRUE:
1984 // If control can fall off the end of the body, jump back to the
1985 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001987 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001988 }
1989 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001990 case ALWAYS_FALSE:
1991 // If we have a continue in the body, we only have to bind its
1992 // jump target.
1993 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001994 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001995 }
Steve Block3ce2e202009-11-05 08:53:23 +00001996 break;
1997 case DONT_KNOW:
1998 // We have to compile the test expression if it can be reached by
1999 // control flow falling out of the body or via continue.
2000 if (node->continue_target()->is_linked()) {
2001 node->continue_target()->Bind();
2002 }
2003 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002004 Comment cmnt(masm_, "[ DoWhileCondition");
2005 CodeForDoWhileConditionPosition(node);
2006 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00002008 // A invalid frame here indicates that control did not
2009 // fall out of the test expression.
2010 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00002011 }
2012 }
2013 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00002014 }
2015
2016 if (node->break_target()->is_linked()) {
2017 node->break_target()->Bind();
2018 }
Steve Block6ded16b2010-05-10 14:33:55 +01002019 DecrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002020 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2021}
2022
2023
2024void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2025#ifdef DEBUG
2026 int original_height = frame_->height();
2027#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002028 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002029 Comment cmnt(masm_, "[ WhileStatement");
2030 CodeForStatementPosition(node);
2031
2032 // If the test is never true and has no side effects there is no need
2033 // to compile the test or body.
2034 ConditionAnalysis info = AnalyzeCondition(node->cond());
2035 if (info == ALWAYS_FALSE) return;
2036
Kristian Monsen25f61362010-05-21 11:50:48 +01002037 node->break_target()->SetExpectedHeight();
Steve Block6ded16b2010-05-10 14:33:55 +01002038 IncrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002039
2040 // Label the top of the loop with the continue target for the backward
2041 // CFG edge.
Kristian Monsen25f61362010-05-21 11:50:48 +01002042 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002043 node->continue_target()->Bind();
2044
2045 if (info == DONT_KNOW) {
2046 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00002047 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00002048 if (has_valid_frame()) {
2049 // A NULL frame indicates that control did not fall out of the
2050 // test expression.
2051 Branch(false, node->break_target());
2052 }
2053 if (has_valid_frame() || body.is_linked()) {
2054 body.Bind();
2055 }
2056 }
2057
2058 if (has_valid_frame()) {
2059 CheckStack(); // TODO(1222600): ignore if body contains calls.
2060 VisitAndSpill(node->body());
2061
2062 // If control flow can fall out of the body, jump back to the top.
2063 if (has_valid_frame()) {
2064 node->continue_target()->Jump();
2065 }
2066 }
2067 if (node->break_target()->is_linked()) {
2068 node->break_target()->Bind();
2069 }
Steve Block6ded16b2010-05-10 14:33:55 +01002070 DecrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002071 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2072}
2073
2074
2075void CodeGenerator::VisitForStatement(ForStatement* node) {
2076#ifdef DEBUG
2077 int original_height = frame_->height();
2078#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002079 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002080 Comment cmnt(masm_, "[ ForStatement");
2081 CodeForStatementPosition(node);
2082 if (node->init() != NULL) {
2083 VisitAndSpill(node->init());
2084 }
2085
2086 // If the test is never true there is no need to compile the test or
2087 // body.
2088 ConditionAnalysis info = AnalyzeCondition(node->cond());
2089 if (info == ALWAYS_FALSE) return;
2090
Kristian Monsen25f61362010-05-21 11:50:48 +01002091 node->break_target()->SetExpectedHeight();
Steve Block6ded16b2010-05-10 14:33:55 +01002092 IncrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002093
2094 // If there is no update statement, label the top of the loop with the
2095 // continue target, otherwise with the loop target.
2096 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2097 if (node->next() == NULL) {
Kristian Monsen25f61362010-05-21 11:50:48 +01002098 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002099 node->continue_target()->Bind();
2100 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01002101 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002102 loop.Bind();
2103 }
2104
2105 // If the test is always true, there is no need to compile it.
2106 if (info == DONT_KNOW) {
2107 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00002108 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00002109 if (has_valid_frame()) {
2110 Branch(false, node->break_target());
2111 }
2112 if (has_valid_frame() || body.is_linked()) {
2113 body.Bind();
2114 }
2115 }
2116
2117 if (has_valid_frame()) {
2118 CheckStack(); // TODO(1222600): ignore if body contains calls.
2119 VisitAndSpill(node->body());
2120
2121 if (node->next() == NULL) {
2122 // If there is no update statement and control flow can fall out
2123 // of the loop, jump directly to the continue label.
2124 if (has_valid_frame()) {
2125 node->continue_target()->Jump();
2126 }
2127 } else {
2128 // If there is an update statement and control flow can reach it
2129 // via falling out of the body of the loop or continuing, we
2130 // compile the update statement.
2131 if (node->continue_target()->is_linked()) {
2132 node->continue_target()->Bind();
2133 }
2134 if (has_valid_frame()) {
2135 // Record source position of the statement as this code which is
2136 // after the code for the body actually belongs to the loop
2137 // statement and not the body.
2138 CodeForStatementPosition(node);
2139 VisitAndSpill(node->next());
2140 loop.Jump();
2141 }
2142 }
2143 }
2144 if (node->break_target()->is_linked()) {
2145 node->break_target()->Bind();
2146 }
Steve Block6ded16b2010-05-10 14:33:55 +01002147 DecrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00002148 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2149}
2150
2151
2152void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2153#ifdef DEBUG
2154 int original_height = frame_->height();
2155#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002156 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002157 Comment cmnt(masm_, "[ ForInStatement");
2158 CodeForStatementPosition(node);
2159
2160 JumpTarget primitive;
2161 JumpTarget jsobject;
2162 JumpTarget fixed_array;
2163 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2164 JumpTarget end_del_check;
2165 JumpTarget exit;
2166
2167 // Get the object to enumerate over (converted to JSObject).
2168 LoadAndSpill(node->enumerable());
2169
2170 // Both SpiderMonkey and kjs ignore null and undefined in contrast
2171 // to the specification. 12.6.4 mandates a call to ToObject.
2172 frame_->EmitPop(r0);
2173 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2174 __ cmp(r0, ip);
2175 exit.Branch(eq);
2176 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2177 __ cmp(r0, ip);
2178 exit.Branch(eq);
2179
2180 // Stack layout in body:
2181 // [iteration counter (Smi)]
2182 // [length of array]
2183 // [FixedArray]
2184 // [Map or 0]
2185 // [Object]
2186
2187 // Check if enumerable is already a JSObject
2188 __ tst(r0, Operand(kSmiTagMask));
2189 primitive.Branch(eq);
2190 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2191 jsobject.Branch(hs);
2192
2193 primitive.Bind();
2194 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00002195 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00002196
2197 jsobject.Bind();
2198 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00002199 // r0: value to be iterated over
2200 frame_->EmitPush(r0); // Push the object being iterated over.
2201
2202 // Check cache validity in generated code. This is a fast case for
2203 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2204 // guarantee cache validity, call the runtime system to check cache
2205 // validity or get the property names in a fixed array.
2206 JumpTarget call_runtime;
2207 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2208 JumpTarget check_prototype;
2209 JumpTarget use_cache;
2210 __ mov(r1, Operand(r0));
2211 loop.Bind();
2212 // Check that there are no elements.
2213 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2214 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2215 __ cmp(r2, r4);
2216 call_runtime.Branch(ne);
2217 // Check that instance descriptors are not empty so that we can
2218 // check for an enum cache. Leave the map in r3 for the subsequent
2219 // prototype load.
2220 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2221 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2222 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2223 __ cmp(r2, ip);
2224 call_runtime.Branch(eq);
2225 // Check that there in an enum cache in the non-empty instance
2226 // descriptors. This is the case if the next enumeration index
2227 // field does not contain a smi.
2228 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2229 __ tst(r2, Operand(kSmiTagMask));
2230 call_runtime.Branch(eq);
2231 // For all objects but the receiver, check that the cache is empty.
2232 // r4: empty fixed array root.
2233 __ cmp(r1, r0);
2234 check_prototype.Branch(eq);
2235 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2236 __ cmp(r2, r4);
2237 call_runtime.Branch(ne);
2238 check_prototype.Bind();
2239 // Load the prototype from the map and loop if non-null.
2240 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2241 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2242 __ cmp(r1, ip);
2243 loop.Branch(ne);
2244 // The enum cache is valid. Load the map of the object being
2245 // iterated over and use the cache for the iteration.
2246 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2247 use_cache.Jump();
2248
2249 call_runtime.Bind();
2250 // Call the runtime to get the property names for the object.
2251 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00002252 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2253
Steve Blockd0582a62009-12-15 09:54:21 +00002254 // If we got a map from the runtime call, we can do a fast
2255 // modification check. Otherwise, we got a fixed array, and we have
2256 // to do a slow check.
2257 // r0: map or fixed array (result from call to
2258 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00002259 __ mov(r2, Operand(r0));
2260 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2261 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2262 __ cmp(r1, ip);
2263 fixed_array.Branch(ne);
2264
Steve Blockd0582a62009-12-15 09:54:21 +00002265 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00002266 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00002267 // r0: map (either the result from a call to
2268 // Runtime::kGetPropertyNamesFast or has been fetched directly from
2269 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00002270 __ mov(r1, Operand(r0));
2271 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2272 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2273 __ ldr(r2,
2274 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2275
2276 frame_->EmitPush(r0); // map
2277 frame_->EmitPush(r2); // enum cache bridge cache
2278 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2279 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2280 frame_->EmitPush(r0);
2281 __ mov(r0, Operand(Smi::FromInt(0)));
2282 frame_->EmitPush(r0);
2283 entry.Jump();
2284
2285 fixed_array.Bind();
2286 __ mov(r1, Operand(Smi::FromInt(0)));
2287 frame_->EmitPush(r1); // insert 0 in place of Map
2288 frame_->EmitPush(r0);
2289
2290 // Push the length of the array and the initial index onto the stack.
2291 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2292 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2293 frame_->EmitPush(r0);
2294 __ mov(r0, Operand(Smi::FromInt(0))); // init index
2295 frame_->EmitPush(r0);
2296
2297 // Condition.
2298 entry.Bind();
2299 // sp[0] : index
2300 // sp[1] : array/enum cache length
2301 // sp[2] : array or enum cache
2302 // sp[3] : 0 or map
2303 // sp[4] : enumerable
2304 // Grab the current frame's height for the break and continue
2305 // targets only after all the state is pushed on the frame.
Kristian Monsen25f61362010-05-21 11:50:48 +01002306 node->break_target()->SetExpectedHeight();
2307 node->continue_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00002308
Kristian Monsen25f61362010-05-21 11:50:48 +01002309 // Load the current count to r0, load the length to r1.
Leon Clarkef7060e22010-06-03 12:02:55 +01002310 __ Ldrd(r0, r1, frame_->ElementAt(0));
Steve Block6ded16b2010-05-10 14:33:55 +01002311 __ cmp(r0, r1); // compare to the array length
Steve Blocka7e24c12009-10-30 11:49:00 +00002312 node->break_target()->Branch(hs);
2313
Steve Blocka7e24c12009-10-30 11:49:00 +00002314 // Get the i'th entry of the array.
2315 __ ldr(r2, frame_->ElementAt(2));
2316 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2317 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2318
2319 // Get Map or 0.
2320 __ ldr(r2, frame_->ElementAt(3));
2321 // Check if this (still) matches the map of the enumerable.
2322 // If not, we have to filter the key.
2323 __ ldr(r1, frame_->ElementAt(4));
2324 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2325 __ cmp(r1, Operand(r2));
2326 end_del_check.Branch(eq);
2327
2328 // Convert the entry to a string (or null if it isn't a property anymore).
2329 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
2330 frame_->EmitPush(r0);
2331 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00002332 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002333 __ mov(r3, Operand(r0));
2334
2335 // If the property has been removed while iterating, we just skip it.
2336 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2337 __ cmp(r3, ip);
2338 node->continue_target()->Branch(eq);
2339
2340 end_del_check.Bind();
2341 // Store the entry in the 'each' expression and take another spin in the
2342 // loop. r3: i'th entry of the enum cache (or string there of)
2343 frame_->EmitPush(r3); // push entry
2344 { Reference each(this, node->each());
2345 if (!each.is_illegal()) {
2346 if (each.size() > 0) {
2347 __ ldr(r0, frame_->ElementAt(each.size()));
2348 frame_->EmitPush(r0);
Leon Clarked91b9f72010-01-27 17:25:45 +00002349 each.SetValue(NOT_CONST_INIT);
2350 frame_->Drop(2);
2351 } else {
2352 // If the reference was to a slot we rely on the convenient property
2353 // that it doesn't matter whether a value (eg, r3 pushed above) is
2354 // right on top of or right underneath a zero-sized reference.
2355 each.SetValue(NOT_CONST_INIT);
2356 frame_->Drop();
Steve Blocka7e24c12009-10-30 11:49:00 +00002357 }
2358 }
2359 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002360 // Body.
2361 CheckStack(); // TODO(1222600): ignore if body contains calls.
2362 VisitAndSpill(node->body());
2363
2364 // Next. Reestablish a spilled frame in case we are coming here via
2365 // a continue in the body.
2366 node->continue_target()->Bind();
2367 frame_->SpillAll();
2368 frame_->EmitPop(r0);
2369 __ add(r0, r0, Operand(Smi::FromInt(1)));
2370 frame_->EmitPush(r0);
2371 entry.Jump();
2372
2373 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
2374 // any frame.
2375 node->break_target()->Bind();
2376 frame_->Drop(5);
2377
2378 // Exit.
2379 exit.Bind();
2380 node->continue_target()->Unuse();
2381 node->break_target()->Unuse();
2382 ASSERT(frame_->height() == original_height);
2383}
2384
2385
Steve Block3ce2e202009-11-05 08:53:23 +00002386void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002387#ifdef DEBUG
2388 int original_height = frame_->height();
2389#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002390 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002391 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002392 CodeForStatementPosition(node);
2393
2394 JumpTarget try_block;
2395 JumpTarget exit;
2396
2397 try_block.Call();
2398 // --- Catch block ---
2399 frame_->EmitPush(r0);
2400
2401 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00002402 Variable* catch_var = node->catch_var()->var();
2403 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2404 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00002405
2406 // Remove the exception from the stack.
2407 frame_->Drop();
2408
2409 VisitStatementsAndSpill(node->catch_block()->statements());
2410 if (frame_ != NULL) {
2411 exit.Jump();
2412 }
2413
2414
2415 // --- Try block ---
2416 try_block.Bind();
2417
2418 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2419 int handler_height = frame_->height();
2420
2421 // Shadow the labels for all escapes from the try block, including
2422 // returns. During shadowing, the original label is hidden as the
2423 // LabelShadow and operations on the original actually affect the
2424 // shadowing label.
2425 //
2426 // We should probably try to unify the escaping labels and the return
2427 // label.
2428 int nof_escapes = node->escaping_targets()->length();
2429 List<ShadowTarget*> shadows(1 + nof_escapes);
2430
2431 // Add the shadow target for the function return.
2432 static const int kReturnShadowIndex = 0;
2433 shadows.Add(new ShadowTarget(&function_return_));
2434 bool function_return_was_shadowed = function_return_is_shadowed_;
2435 function_return_is_shadowed_ = true;
2436 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2437
2438 // Add the remaining shadow targets.
2439 for (int i = 0; i < nof_escapes; i++) {
2440 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2441 }
2442
2443 // Generate code for the statements in the try block.
2444 VisitStatementsAndSpill(node->try_block()->statements());
2445
2446 // Stop the introduced shadowing and count the number of required unlinks.
2447 // After shadowing stops, the original labels are unshadowed and the
2448 // LabelShadows represent the formerly shadowing labels.
2449 bool has_unlinks = false;
2450 for (int i = 0; i < shadows.length(); i++) {
2451 shadows[i]->StopShadowing();
2452 has_unlinks = has_unlinks || shadows[i]->is_linked();
2453 }
2454 function_return_is_shadowed_ = function_return_was_shadowed;
2455
2456 // Get an external reference to the handler address.
2457 ExternalReference handler_address(Top::k_handler_address);
2458
2459 // If we can fall off the end of the try block, unlink from try chain.
2460 if (has_valid_frame()) {
2461 // The next handler address is on top of the frame. Unlink from
2462 // the handler list and drop the rest of this handler from the
2463 // frame.
2464 ASSERT(StackHandlerConstants::kNextOffset == 0);
2465 frame_->EmitPop(r1);
2466 __ mov(r3, Operand(handler_address));
2467 __ str(r1, MemOperand(r3));
2468 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2469 if (has_unlinks) {
2470 exit.Jump();
2471 }
2472 }
2473
2474 // Generate unlink code for the (formerly) shadowing labels that have been
2475 // jumped to. Deallocate each shadow target.
2476 for (int i = 0; i < shadows.length(); i++) {
2477 if (shadows[i]->is_linked()) {
2478 // Unlink from try chain;
2479 shadows[i]->Bind();
2480 // Because we can be jumping here (to spilled code) from unspilled
2481 // code, we need to reestablish a spilled frame at this block.
2482 frame_->SpillAll();
2483
2484 // Reload sp from the top handler, because some statements that we
2485 // break from (eg, for...in) may have left stuff on the stack.
2486 __ mov(r3, Operand(handler_address));
2487 __ ldr(sp, MemOperand(r3));
2488 frame_->Forget(frame_->height() - handler_height);
2489
2490 ASSERT(StackHandlerConstants::kNextOffset == 0);
2491 frame_->EmitPop(r1);
2492 __ str(r1, MemOperand(r3));
2493 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2494
2495 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2496 frame_->PrepareForReturn();
2497 }
2498 shadows[i]->other_target()->Jump();
2499 }
2500 }
2501
2502 exit.Bind();
2503 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2504}
2505
2506
Steve Block3ce2e202009-11-05 08:53:23 +00002507void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002508#ifdef DEBUG
2509 int original_height = frame_->height();
2510#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002511 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002512 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002513 CodeForStatementPosition(node);
2514
2515 // State: Used to keep track of reason for entering the finally
2516 // block. Should probably be extended to hold information for
2517 // break/continue from within the try block.
2518 enum { FALLING, THROWING, JUMPING };
2519
2520 JumpTarget try_block;
2521 JumpTarget finally_block;
2522
2523 try_block.Call();
2524
2525 frame_->EmitPush(r0); // save exception object on the stack
2526 // In case of thrown exceptions, this is where we continue.
2527 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2528 finally_block.Jump();
2529
2530 // --- Try block ---
2531 try_block.Bind();
2532
2533 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2534 int handler_height = frame_->height();
2535
2536 // Shadow the labels for all escapes from the try block, including
2537 // returns. Shadowing hides the original label as the LabelShadow and
2538 // operations on the original actually affect the shadowing label.
2539 //
2540 // We should probably try to unify the escaping labels and the return
2541 // label.
2542 int nof_escapes = node->escaping_targets()->length();
2543 List<ShadowTarget*> shadows(1 + nof_escapes);
2544
2545 // Add the shadow target for the function return.
2546 static const int kReturnShadowIndex = 0;
2547 shadows.Add(new ShadowTarget(&function_return_));
2548 bool function_return_was_shadowed = function_return_is_shadowed_;
2549 function_return_is_shadowed_ = true;
2550 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2551
2552 // Add the remaining shadow targets.
2553 for (int i = 0; i < nof_escapes; i++) {
2554 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2555 }
2556
2557 // Generate code for the statements in the try block.
2558 VisitStatementsAndSpill(node->try_block()->statements());
2559
2560 // Stop the introduced shadowing and count the number of required unlinks.
2561 // After shadowing stops, the original labels are unshadowed and the
2562 // LabelShadows represent the formerly shadowing labels.
2563 int nof_unlinks = 0;
2564 for (int i = 0; i < shadows.length(); i++) {
2565 shadows[i]->StopShadowing();
2566 if (shadows[i]->is_linked()) nof_unlinks++;
2567 }
2568 function_return_is_shadowed_ = function_return_was_shadowed;
2569
2570 // Get an external reference to the handler address.
2571 ExternalReference handler_address(Top::k_handler_address);
2572
2573 // If we can fall off the end of the try block, unlink from the try
2574 // chain and set the state on the frame to FALLING.
2575 if (has_valid_frame()) {
2576 // The next handler address is on top of the frame.
2577 ASSERT(StackHandlerConstants::kNextOffset == 0);
2578 frame_->EmitPop(r1);
2579 __ mov(r3, Operand(handler_address));
2580 __ str(r1, MemOperand(r3));
2581 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2582
2583 // Fake a top of stack value (unneeded when FALLING) and set the
2584 // state in r2, then jump around the unlink blocks if any.
2585 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2586 frame_->EmitPush(r0);
2587 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2588 if (nof_unlinks > 0) {
2589 finally_block.Jump();
2590 }
2591 }
2592
2593 // Generate code to unlink and set the state for the (formerly)
2594 // shadowing targets that have been jumped to.
2595 for (int i = 0; i < shadows.length(); i++) {
2596 if (shadows[i]->is_linked()) {
2597 // If we have come from the shadowed return, the return value is
2598 // in (a non-refcounted reference to) r0. We must preserve it
2599 // until it is pushed.
2600 //
2601 // Because we can be jumping here (to spilled code) from
2602 // unspilled code, we need to reestablish a spilled frame at
2603 // this block.
2604 shadows[i]->Bind();
2605 frame_->SpillAll();
2606
2607 // Reload sp from the top handler, because some statements that
2608 // we break from (eg, for...in) may have left stuff on the
2609 // stack.
2610 __ mov(r3, Operand(handler_address));
2611 __ ldr(sp, MemOperand(r3));
2612 frame_->Forget(frame_->height() - handler_height);
2613
2614 // Unlink this handler and drop it from the frame. The next
2615 // handler address is currently on top of the frame.
2616 ASSERT(StackHandlerConstants::kNextOffset == 0);
2617 frame_->EmitPop(r1);
2618 __ str(r1, MemOperand(r3));
2619 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2620
2621 if (i == kReturnShadowIndex) {
2622 // If this label shadowed the function return, materialize the
2623 // return value on the stack.
2624 frame_->EmitPush(r0);
2625 } else {
2626 // Fake TOS for targets that shadowed breaks and continues.
2627 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2628 frame_->EmitPush(r0);
2629 }
2630 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2631 if (--nof_unlinks > 0) {
2632 // If this is not the last unlink block, jump around the next.
2633 finally_block.Jump();
2634 }
2635 }
2636 }
2637
2638 // --- Finally block ---
2639 finally_block.Bind();
2640
2641 // Push the state on the stack.
2642 frame_->EmitPush(r2);
2643
2644 // We keep two elements on the stack - the (possibly faked) result
2645 // and the state - while evaluating the finally block.
2646 //
2647 // Generate code for the statements in the finally block.
2648 VisitStatementsAndSpill(node->finally_block()->statements());
2649
2650 if (has_valid_frame()) {
2651 // Restore state and return value or faked TOS.
2652 frame_->EmitPop(r2);
2653 frame_->EmitPop(r0);
2654 }
2655
2656 // Generate code to jump to the right destination for all used
2657 // formerly shadowing targets. Deallocate each shadow target.
2658 for (int i = 0; i < shadows.length(); i++) {
2659 if (has_valid_frame() && shadows[i]->is_bound()) {
2660 JumpTarget* original = shadows[i]->other_target();
2661 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2662 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2663 JumpTarget skip;
2664 skip.Branch(ne);
2665 frame_->PrepareForReturn();
2666 original->Jump();
2667 skip.Bind();
2668 } else {
2669 original->Branch(eq);
2670 }
2671 }
2672 }
2673
2674 if (has_valid_frame()) {
2675 // Check if we need to rethrow the exception.
2676 JumpTarget exit;
2677 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2678 exit.Branch(ne);
2679
2680 // Rethrow exception.
2681 frame_->EmitPush(r0);
2682 frame_->CallRuntime(Runtime::kReThrow, 1);
2683
2684 // Done.
2685 exit.Bind();
2686 }
2687 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2688}
2689
2690
2691void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2692#ifdef DEBUG
2693 int original_height = frame_->height();
2694#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002695 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002696 Comment cmnt(masm_, "[ DebuggerStatament");
2697 CodeForStatementPosition(node);
2698#ifdef ENABLE_DEBUGGER_SUPPORT
Andrei Popescu402d9372010-02-26 13:31:12 +00002699 frame_->DebugBreak();
Steve Blocka7e24c12009-10-30 11:49:00 +00002700#endif
2701 // Ignore the return value.
2702 ASSERT(frame_->height() == original_height);
2703}
2704
2705
Steve Block6ded16b2010-05-10 14:33:55 +01002706void CodeGenerator::InstantiateFunction(
2707 Handle<SharedFunctionInfo> function_info) {
2708 VirtualFrame::SpilledScope spilled_scope(frame_);
2709 __ mov(r0, Operand(function_info));
Leon Clarkee46be812010-01-19 14:06:41 +00002710 // Use the fast case closure allocation code that allocates in new
2711 // space for nested functions that don't need literals cloning.
Steve Block6ded16b2010-05-10 14:33:55 +01002712 if (scope()->is_function_scope() && function_info->num_literals() == 0) {
Leon Clarkee46be812010-01-19 14:06:41 +00002713 FastNewClosureStub stub;
2714 frame_->EmitPush(r0);
2715 frame_->CallStub(&stub, 1);
2716 frame_->EmitPush(r0);
2717 } else {
2718 // Create a new closure.
2719 frame_->EmitPush(cp);
2720 frame_->EmitPush(r0);
2721 frame_->CallRuntime(Runtime::kNewClosure, 2);
2722 frame_->EmitPush(r0);
2723 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002724}
2725
2726
2727void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2728#ifdef DEBUG
2729 int original_height = frame_->height();
2730#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002731 Comment cmnt(masm_, "[ FunctionLiteral");
2732
Steve Block6ded16b2010-05-10 14:33:55 +01002733 // Build the function info and instantiate it.
2734 Handle<SharedFunctionInfo> function_info =
2735 Compiler::BuildFunctionInfo(node, script(), this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002736 // Check for stack-overflow exception.
2737 if (HasStackOverflow()) {
2738 ASSERT(frame_->height() == original_height);
2739 return;
2740 }
Steve Block6ded16b2010-05-10 14:33:55 +01002741 InstantiateFunction(function_info);
2742 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002743}
2744
2745
Steve Block6ded16b2010-05-10 14:33:55 +01002746void CodeGenerator::VisitSharedFunctionInfoLiteral(
2747 SharedFunctionInfoLiteral* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002748#ifdef DEBUG
2749 int original_height = frame_->height();
2750#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002751 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2752 InstantiateFunction(node->shared_function_info());
2753 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002754}
2755
2756
2757void CodeGenerator::VisitConditional(Conditional* node) {
2758#ifdef DEBUG
2759 int original_height = frame_->height();
2760#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002761 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002762 Comment cmnt(masm_, "[ Conditional");
2763 JumpTarget then;
2764 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002765 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 if (has_valid_frame()) {
2767 Branch(false, &else_);
2768 }
2769 if (has_valid_frame() || then.is_linked()) {
2770 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002771 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002772 }
2773 if (else_.is_linked()) {
2774 JumpTarget exit;
2775 if (has_valid_frame()) exit.Jump();
2776 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002777 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002778 if (exit.is_linked()) exit.Bind();
2779 }
Steve Block6ded16b2010-05-10 14:33:55 +01002780 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002781}
2782
2783
2784void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002785 if (slot->type() == Slot::LOOKUP) {
2786 ASSERT(slot->var()->is_dynamic());
2787
Steve Block6ded16b2010-05-10 14:33:55 +01002788 // JumpTargets do not yet support merging frames so the frame must be
2789 // spilled when jumping to these targets.
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 JumpTarget slow;
2791 JumpTarget done;
2792
Kristian Monsen25f61362010-05-21 11:50:48 +01002793 // Generate fast case for loading from slots that correspond to
2794 // local/global variables or arguments unless they are shadowed by
2795 // eval-introduced bindings.
2796 EmitDynamicLoadFromSlotFastCase(slot,
2797 typeof_state,
2798 &slow,
2799 &done);
Steve Blocka7e24c12009-10-30 11:49:00 +00002800
2801 slow.Bind();
Steve Block6ded16b2010-05-10 14:33:55 +01002802 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002803 frame_->EmitPush(cp);
2804 __ mov(r0, Operand(slot->var()->name()));
2805 frame_->EmitPush(r0);
2806
2807 if (typeof_state == INSIDE_TYPEOF) {
2808 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2809 } else {
2810 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2811 }
2812
2813 done.Bind();
2814 frame_->EmitPush(r0);
2815
2816 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002817 Register scratch = VirtualFrame::scratch0();
2818 frame_->EmitPush(SlotOperand(slot, scratch));
Steve Blocka7e24c12009-10-30 11:49:00 +00002819 if (slot->var()->mode() == Variable::CONST) {
2820 // Const slots may contain 'the hole' value (the constant hasn't been
2821 // initialized yet) which needs to be converted into the 'undefined'
2822 // value.
2823 Comment cmnt(masm_, "[ Unhole const");
Steve Block6ded16b2010-05-10 14:33:55 +01002824 frame_->EmitPop(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +00002825 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01002826 __ cmp(scratch, ip);
2827 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
2828 frame_->EmitPush(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +00002829 }
2830 }
2831}
2832
2833
Steve Block6ded16b2010-05-10 14:33:55 +01002834void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
2835 TypeofState state) {
2836 LoadFromSlot(slot, state);
2837
2838 // Bail out quickly if we're not using lazy arguments allocation.
2839 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
2840
2841 // ... or if the slot isn't a non-parameter arguments slot.
2842 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
2843
2844 VirtualFrame::SpilledScope spilled_scope(frame_);
2845
2846 // Load the loaded value from the stack into r0 but leave it on the
2847 // stack.
2848 __ ldr(r0, MemOperand(sp, 0));
2849
2850 // If the loaded value is the sentinel that indicates that we
2851 // haven't loaded the arguments object yet, we need to do it now.
2852 JumpTarget exit;
2853 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2854 __ cmp(r0, ip);
2855 exit.Branch(ne);
2856 frame_->Drop();
2857 StoreArgumentsObject(false);
2858 exit.Bind();
2859}
2860
2861
Leon Clarkee46be812010-01-19 14:06:41 +00002862void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2863 ASSERT(slot != NULL);
2864 if (slot->type() == Slot::LOOKUP) {
Steve Block6ded16b2010-05-10 14:33:55 +01002865 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarkee46be812010-01-19 14:06:41 +00002866 ASSERT(slot->var()->is_dynamic());
2867
2868 // For now, just do a runtime call.
2869 frame_->EmitPush(cp);
2870 __ mov(r0, Operand(slot->var()->name()));
2871 frame_->EmitPush(r0);
2872
2873 if (init_state == CONST_INIT) {
2874 // Same as the case for a normal store, but ignores attribute
2875 // (e.g. READ_ONLY) of context slot so that we can initialize
2876 // const properties (introduced via eval("const foo = (some
2877 // expr);")). Also, uses the current function context instead of
2878 // the top context.
2879 //
2880 // Note that we must declare the foo upon entry of eval(), via a
2881 // context slot declaration, but we cannot initialize it at the
2882 // same time, because the const declaration may be at the end of
2883 // the eval code (sigh...) and the const variable may have been
2884 // used before (where its value is 'undefined'). Thus, we can only
2885 // do the initialization when we actually encounter the expression
2886 // and when the expression operands are defined and valid, and
2887 // thus we need the split into 2 operations: declaration of the
2888 // context slot followed by initialization.
2889 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2890 } else {
2891 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2892 }
2893 // Storing a variable must keep the (new) value on the expression
2894 // stack. This is necessary for compiling assignment expressions.
2895 frame_->EmitPush(r0);
2896
2897 } else {
2898 ASSERT(!slot->var()->is_dynamic());
Steve Block6ded16b2010-05-10 14:33:55 +01002899 Register scratch = VirtualFrame::scratch0();
2900 VirtualFrame::RegisterAllocationScope scope(this);
Leon Clarkee46be812010-01-19 14:06:41 +00002901
Steve Block6ded16b2010-05-10 14:33:55 +01002902 // The frame must be spilled when branching to this target.
Leon Clarkee46be812010-01-19 14:06:41 +00002903 JumpTarget exit;
Steve Block6ded16b2010-05-10 14:33:55 +01002904
Leon Clarkee46be812010-01-19 14:06:41 +00002905 if (init_state == CONST_INIT) {
2906 ASSERT(slot->var()->mode() == Variable::CONST);
2907 // Only the first const initialization must be executed (the slot
2908 // still contains 'the hole' value). When the assignment is
2909 // executed, the code is identical to a normal store (see below).
2910 Comment cmnt(masm_, "[ Init const");
Steve Block6ded16b2010-05-10 14:33:55 +01002911 __ ldr(scratch, SlotOperand(slot, scratch));
Leon Clarkee46be812010-01-19 14:06:41 +00002912 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01002913 __ cmp(scratch, ip);
2914 frame_->SpillAll();
Leon Clarkee46be812010-01-19 14:06:41 +00002915 exit.Branch(ne);
2916 }
2917
2918 // We must execute the store. Storing a variable must keep the
2919 // (new) value on the stack. This is necessary for compiling
2920 // assignment expressions.
2921 //
2922 // Note: We will reach here even with slot->var()->mode() ==
2923 // Variable::CONST because of const declarations which will
2924 // initialize consts to 'the hole' value and by doing so, end up
2925 // calling this code. r2 may be loaded with context; used below in
2926 // RecordWrite.
Steve Block6ded16b2010-05-10 14:33:55 +01002927 Register tos = frame_->Peek();
2928 __ str(tos, SlotOperand(slot, scratch));
Leon Clarkee46be812010-01-19 14:06:41 +00002929 if (slot->type() == Slot::CONTEXT) {
2930 // Skip write barrier if the written value is a smi.
Steve Block6ded16b2010-05-10 14:33:55 +01002931 __ tst(tos, Operand(kSmiTagMask));
2932 // We don't use tos any more after here.
2933 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarkee46be812010-01-19 14:06:41 +00002934 exit.Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01002935 // scratch is loaded with context when calling SlotOperand above.
Leon Clarkee46be812010-01-19 14:06:41 +00002936 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2937 __ mov(r3, Operand(offset));
Steve Block6ded16b2010-05-10 14:33:55 +01002938 // r1 could be identical with tos, but that doesn't matter.
2939 __ RecordWrite(scratch, r3, r1);
Leon Clarkee46be812010-01-19 14:06:41 +00002940 }
2941 // If we definitely did not jump over the assignment, we do not need
2942 // to bind the exit label. Doing so can defeat peephole
2943 // optimization.
2944 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
Steve Block6ded16b2010-05-10 14:33:55 +01002945 frame_->SpillAll();
Leon Clarkee46be812010-01-19 14:06:41 +00002946 exit.Bind();
2947 }
2948 }
2949}
2950
2951
Steve Blocka7e24c12009-10-30 11:49:00 +00002952void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2953 TypeofState typeof_state,
Steve Blocka7e24c12009-10-30 11:49:00 +00002954 JumpTarget* slow) {
2955 // Check that no extension objects have been created by calls to
2956 // eval from the current scope to the global scope.
Steve Block6ded16b2010-05-10 14:33:55 +01002957 Register tmp = frame_->scratch0();
2958 Register tmp2 = frame_->scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00002959 Register context = cp;
2960 Scope* s = scope();
2961 while (s != NULL) {
2962 if (s->num_heap_slots() > 0) {
2963 if (s->calls_eval()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002964 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00002965 // Check that extension is NULL.
2966 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2967 __ tst(tmp2, tmp2);
2968 slow->Branch(ne);
2969 }
2970 // Load next context in chain.
2971 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2972 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2973 context = tmp;
2974 }
2975 // If no outer scope calls eval, we do not need to check more
2976 // context extensions.
2977 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2978 s = s->outer_scope();
2979 }
2980
2981 if (s->is_eval_scope()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002982 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00002983 Label next, fast;
Steve Block6ded16b2010-05-10 14:33:55 +01002984 __ Move(tmp, context);
Steve Blocka7e24c12009-10-30 11:49:00 +00002985 __ bind(&next);
2986 // Terminate at global context.
2987 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2988 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2989 __ cmp(tmp2, ip);
2990 __ b(eq, &fast);
2991 // Check that extension is NULL.
2992 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2993 __ tst(tmp2, tmp2);
2994 slow->Branch(ne);
2995 // Load next context in chain.
2996 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2997 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2998 __ b(&next);
2999 __ bind(&fast);
3000 }
3001
Steve Blocka7e24c12009-10-30 11:49:00 +00003002 // Load the global object.
3003 LoadGlobal();
Steve Block6ded16b2010-05-10 14:33:55 +01003004 // Setup the name register and call load IC.
3005 frame_->CallLoadIC(slot->var()->name(),
3006 typeof_state == INSIDE_TYPEOF
3007 ? RelocInfo::CODE_TARGET
3008 : RelocInfo::CODE_TARGET_CONTEXT);
Steve Blocka7e24c12009-10-30 11:49:00 +00003009}
3010
3011
Kristian Monsen25f61362010-05-21 11:50:48 +01003012void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3013 TypeofState typeof_state,
3014 JumpTarget* slow,
3015 JumpTarget* done) {
3016 // Generate fast-case code for variables that might be shadowed by
3017 // eval-introduced variables. Eval is used a lot without
3018 // introducing variables. In those cases, we do not want to
3019 // perform a runtime call for all variables in the scope
3020 // containing the eval.
3021 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3022 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3023 frame_->SpillAll();
3024 done->Jump();
3025
3026 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3027 frame_->SpillAll();
3028 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3029 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3030 if (potential_slot != NULL) {
3031 // Generate fast case for locals that rewrite to slots.
3032 __ ldr(r0,
3033 ContextSlotOperandCheckExtensions(potential_slot,
3034 r1,
3035 r2,
3036 slow));
3037 if (potential_slot->var()->mode() == Variable::CONST) {
3038 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3039 __ cmp(r0, ip);
3040 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3041 }
3042 done->Jump();
3043 } else if (rewrite != NULL) {
3044 // Generate fast case for argument loads.
3045 Property* property = rewrite->AsProperty();
3046 if (property != NULL) {
3047 VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3048 Literal* key_literal = property->key()->AsLiteral();
3049 if (obj_proxy != NULL &&
3050 key_literal != NULL &&
3051 obj_proxy->IsArguments() &&
3052 key_literal->handle()->IsSmi()) {
3053 // Load arguments object if there are no eval-introduced
3054 // variables. Then load the argument from the arguments
3055 // object using keyed load.
3056 __ ldr(r0,
3057 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
3058 r1,
3059 r2,
3060 slow));
3061 frame_->EmitPush(r0);
3062 __ mov(r1, Operand(key_literal->handle()));
3063 frame_->EmitPush(r1);
3064 EmitKeyedLoad();
3065 done->Jump();
3066 }
3067 }
3068 }
3069 }
3070}
3071
3072
Steve Blocka7e24c12009-10-30 11:49:00 +00003073void CodeGenerator::VisitSlot(Slot* node) {
3074#ifdef DEBUG
3075 int original_height = frame_->height();
3076#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003077 Comment cmnt(masm_, "[ Slot");
Steve Block6ded16b2010-05-10 14:33:55 +01003078 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3079 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003080}
3081
3082
3083void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3084#ifdef DEBUG
3085 int original_height = frame_->height();
3086#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003087 Comment cmnt(masm_, "[ VariableProxy");
3088
3089 Variable* var = node->var();
3090 Expression* expr = var->rewrite();
3091 if (expr != NULL) {
3092 Visit(expr);
3093 } else {
3094 ASSERT(var->is_global());
3095 Reference ref(this, node);
Steve Block6ded16b2010-05-10 14:33:55 +01003096 ref.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00003097 }
Steve Block6ded16b2010-05-10 14:33:55 +01003098 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003099}
3100
3101
3102void CodeGenerator::VisitLiteral(Literal* node) {
3103#ifdef DEBUG
3104 int original_height = frame_->height();
3105#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003106 Comment cmnt(masm_, "[ Literal");
Steve Block6ded16b2010-05-10 14:33:55 +01003107 Register reg = frame_->GetTOSRegister();
3108 __ mov(reg, Operand(node->handle()));
3109 frame_->EmitPush(reg);
3110 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003111}
3112
3113
3114void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3115#ifdef DEBUG
3116 int original_height = frame_->height();
3117#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003118 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003119 Comment cmnt(masm_, "[ RexExp Literal");
3120
3121 // Retrieve the literal array and check the allocated entry.
3122
3123 // Load the function of this activation.
3124 __ ldr(r1, frame_->Function());
3125
3126 // Load the literals array of the function.
3127 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
3128
3129 // Load the literal at the ast saved index.
3130 int literal_offset =
3131 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3132 __ ldr(r2, FieldMemOperand(r1, literal_offset));
3133
3134 JumpTarget done;
3135 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3136 __ cmp(r2, ip);
3137 done.Branch(ne);
3138
3139 // If the entry is undefined we call the runtime system to computed
3140 // the literal.
3141 frame_->EmitPush(r1); // literal array (0)
3142 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
3143 frame_->EmitPush(r0); // literal index (1)
3144 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
3145 frame_->EmitPush(r0);
3146 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
3147 frame_->EmitPush(r0);
3148 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3149 __ mov(r2, Operand(r0));
3150
3151 done.Bind();
3152 // Push the literal.
3153 frame_->EmitPush(r2);
Steve Block6ded16b2010-05-10 14:33:55 +01003154 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003155}
3156
3157
Steve Blocka7e24c12009-10-30 11:49:00 +00003158void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3159#ifdef DEBUG
3160 int original_height = frame_->height();
3161#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003162 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003163 Comment cmnt(masm_, "[ ObjectLiteral");
3164
Steve Blocka7e24c12009-10-30 11:49:00 +00003165 // Load the function of this activation.
Steve Block6ded16b2010-05-10 14:33:55 +01003166 __ ldr(r3, frame_->Function());
Leon Clarkee46be812010-01-19 14:06:41 +00003167 // Literal array.
Steve Block6ded16b2010-05-10 14:33:55 +01003168 __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00003169 // Literal index.
Steve Block6ded16b2010-05-10 14:33:55 +01003170 __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
Leon Clarkee46be812010-01-19 14:06:41 +00003171 // Constant properties.
Steve Block6ded16b2010-05-10 14:33:55 +01003172 __ mov(r1, Operand(node->constant_properties()));
3173 // Should the object literal have fast elements?
3174 __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3175 frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
Leon Clarkee46be812010-01-19 14:06:41 +00003176 if (node->depth() > 1) {
Steve Block6ded16b2010-05-10 14:33:55 +01003177 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
Leon Clarkee46be812010-01-19 14:06:41 +00003178 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01003179 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003180 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 frame_->EmitPush(r0); // save the result
Steve Blocka7e24c12009-10-30 11:49:00 +00003182 for (int i = 0; i < node->properties()->length(); i++) {
Andrei Popescu402d9372010-02-26 13:31:12 +00003183 // At the start of each iteration, the top of stack contains
3184 // the newly created object literal.
Steve Blocka7e24c12009-10-30 11:49:00 +00003185 ObjectLiteral::Property* property = node->properties()->at(i);
3186 Literal* key = property->key();
3187 Expression* value = property->value();
3188 switch (property->kind()) {
3189 case ObjectLiteral::Property::CONSTANT:
3190 break;
3191 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3192 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3193 // else fall through
Andrei Popescu402d9372010-02-26 13:31:12 +00003194 case ObjectLiteral::Property::COMPUTED:
3195 if (key->handle()->IsSymbol()) {
3196 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3197 LoadAndSpill(value);
3198 frame_->EmitPop(r0);
3199 __ mov(r2, Operand(key->handle()));
3200 __ ldr(r1, frame_->Top()); // Load the receiver.
3201 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3202 break;
3203 }
3204 // else fall through
Steve Blocka7e24c12009-10-30 11:49:00 +00003205 case ObjectLiteral::Property::PROTOTYPE: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003206 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003207 frame_->EmitPush(r0); // dup the result
3208 LoadAndSpill(key);
3209 LoadAndSpill(value);
3210 frame_->CallRuntime(Runtime::kSetProperty, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003211 break;
3212 }
3213 case ObjectLiteral::Property::SETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003214 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003215 frame_->EmitPush(r0);
3216 LoadAndSpill(key);
3217 __ mov(r0, Operand(Smi::FromInt(1)));
3218 frame_->EmitPush(r0);
3219 LoadAndSpill(value);
3220 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003221 break;
3222 }
3223 case ObjectLiteral::Property::GETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003224 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003225 frame_->EmitPush(r0);
3226 LoadAndSpill(key);
3227 __ mov(r0, Operand(Smi::FromInt(0)));
3228 frame_->EmitPush(r0);
3229 LoadAndSpill(value);
3230 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003231 break;
3232 }
3233 }
3234 }
Steve Block6ded16b2010-05-10 14:33:55 +01003235 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003236}
3237
3238
Steve Blocka7e24c12009-10-30 11:49:00 +00003239void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3240#ifdef DEBUG
3241 int original_height = frame_->height();
3242#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003243 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003244 Comment cmnt(masm_, "[ ArrayLiteral");
3245
Steve Blocka7e24c12009-10-30 11:49:00 +00003246 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00003247 __ ldr(r2, frame_->Function());
Andrei Popescu402d9372010-02-26 13:31:12 +00003248 // Load the literals array of the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003249 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00003250 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
Leon Clarkee46be812010-01-19 14:06:41 +00003251 __ mov(r0, Operand(node->constant_elements()));
3252 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
Andrei Popescu402d9372010-02-26 13:31:12 +00003253 int length = node->values()->length();
Leon Clarkee46be812010-01-19 14:06:41 +00003254 if (node->depth() > 1) {
3255 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00003256 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
Leon Clarkee46be812010-01-19 14:06:41 +00003257 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00003258 } else {
3259 FastCloneShallowArrayStub stub(length);
3260 frame_->CallStub(&stub, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003261 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003262 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00003263 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00003264
3265 // Generate code to set the elements in the array that are not
3266 // literals.
3267 for (int i = 0; i < node->values()->length(); i++) {
3268 Expression* value = node->values()->at(i);
3269
3270 // If value is a literal the property value is already set in the
3271 // boilerplate object.
3272 if (value->AsLiteral() != NULL) continue;
3273 // If value is a materialized literal the property value is already set
3274 // in the boilerplate object if it is simple.
3275 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3276
3277 // The property must be set by generated code.
3278 LoadAndSpill(value);
3279 frame_->EmitPop(r0);
3280
3281 // Fetch the object literal.
3282 __ ldr(r1, frame_->Top());
3283 // Get the elements array.
3284 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3285
3286 // Write to the indexed properties array.
3287 int offset = i * kPointerSize + FixedArray::kHeaderSize;
3288 __ str(r0, FieldMemOperand(r1, offset));
3289
3290 // Update the write barrier for the array address.
3291 __ mov(r3, Operand(offset));
3292 __ RecordWrite(r1, r3, r2);
3293 }
Steve Block6ded16b2010-05-10 14:33:55 +01003294 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003295}
3296
3297
3298void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3299#ifdef DEBUG
3300 int original_height = frame_->height();
3301#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003302 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003303 // Call runtime routine to allocate the catch extension object and
3304 // assign the exception value to the catch variable.
3305 Comment cmnt(masm_, "[ CatchExtensionObject");
3306 LoadAndSpill(node->key());
3307 LoadAndSpill(node->value());
3308 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3309 frame_->EmitPush(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01003310 ASSERT_EQ(original_height + 1, frame_->height());
3311}
3312
3313
3314void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3315#ifdef DEBUG
3316 int original_height = frame_->height();
3317#endif
3318 Comment cmnt(masm(), "[ Variable Assignment");
3319 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3320 ASSERT(var != NULL);
3321 Slot* slot = var->slot();
3322 ASSERT(slot != NULL);
3323
3324 // Evaluate the right-hand side.
3325 if (node->is_compound()) {
3326 // For a compound assignment the right-hand side is a binary operation
3327 // between the current property value and the actual right-hand side.
3328 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3329
3330 // Perform the binary operation.
3331 Literal* literal = node->value()->AsLiteral();
3332 bool overwrite_value =
3333 (node->value()->AsBinaryOperation() != NULL &&
3334 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3335 if (literal != NULL && literal->handle()->IsSmi()) {
3336 SmiOperation(node->binary_op(),
3337 literal->handle(),
3338 false,
3339 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3340 } else {
3341 Load(node->value());
3342 VirtualFrameBinaryOperation(
3343 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3344 }
3345 } else {
3346 Load(node->value());
3347 }
3348
3349 // Perform the assignment.
3350 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3351 CodeForSourcePosition(node->position());
3352 StoreToSlot(slot,
3353 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3354 }
3355 ASSERT_EQ(original_height + 1, frame_->height());
3356}
3357
3358
3359void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3360#ifdef DEBUG
3361 int original_height = frame_->height();
3362#endif
3363 Comment cmnt(masm(), "[ Named Property Assignment");
3364 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3365 Property* prop = node->target()->AsProperty();
3366 ASSERT(var == NULL || (prop == NULL && var->is_global()));
3367
3368 // Initialize name and evaluate the receiver sub-expression if necessary. If
3369 // the receiver is trivial it is not placed on the stack at this point, but
3370 // loaded whenever actually needed.
3371 Handle<String> name;
3372 bool is_trivial_receiver = false;
3373 if (var != NULL) {
3374 name = var->name();
3375 } else {
3376 Literal* lit = prop->key()->AsLiteral();
3377 ASSERT_NOT_NULL(lit);
3378 name = Handle<String>::cast(lit->handle());
3379 // Do not materialize the receiver on the frame if it is trivial.
3380 is_trivial_receiver = prop->obj()->IsTrivial();
3381 if (!is_trivial_receiver) Load(prop->obj());
3382 }
3383
3384 // Change to slow case in the beginning of an initialization block to
3385 // avoid the quadratic behavior of repeatedly adding fast properties.
3386 if (node->starts_initialization_block()) {
3387 // Initialization block consists of assignments of the form expr.x = ..., so
3388 // this will never be an assignment to a variable, so there must be a
3389 // receiver object.
3390 ASSERT_EQ(NULL, var);
3391 if (is_trivial_receiver) {
3392 Load(prop->obj());
3393 } else {
3394 frame_->Dup();
3395 }
3396 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3397 }
3398
3399 // Change to fast case at the end of an initialization block. To prepare for
3400 // that add an extra copy of the receiver to the frame, so that it can be
3401 // converted back to fast case after the assignment.
3402 if (node->ends_initialization_block() && !is_trivial_receiver) {
3403 frame_->Dup();
3404 }
3405
3406 // Stack layout:
3407 // [tos] : receiver (only materialized if non-trivial)
3408 // [tos+1] : receiver if at the end of an initialization block
3409
3410 // Evaluate the right-hand side.
3411 if (node->is_compound()) {
3412 // For a compound assignment the right-hand side is a binary operation
3413 // between the current property value and the actual right-hand side.
3414 if (is_trivial_receiver) {
3415 Load(prop->obj());
3416 } else if (var != NULL) {
3417 LoadGlobal();
3418 } else {
3419 frame_->Dup();
3420 }
3421 EmitNamedLoad(name, var != NULL);
Steve Block6ded16b2010-05-10 14:33:55 +01003422 frame_->EmitPush(r0);
3423
3424 // Perform the binary operation.
3425 Literal* literal = node->value()->AsLiteral();
3426 bool overwrite_value =
3427 (node->value()->AsBinaryOperation() != NULL &&
3428 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3429 if (literal != NULL && literal->handle()->IsSmi()) {
3430 SmiOperation(node->binary_op(),
3431 literal->handle(),
3432 false,
3433 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3434 } else {
3435 Load(node->value());
3436 VirtualFrameBinaryOperation(
3437 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3438 }
3439 } else {
3440 // For non-compound assignment just load the right-hand side.
3441 Load(node->value());
3442 }
3443
3444 // Stack layout:
3445 // [tos] : value
3446 // [tos+1] : receiver (only materialized if non-trivial)
3447 // [tos+2] : receiver if at the end of an initialization block
3448
3449 // Perform the assignment. It is safe to ignore constants here.
3450 ASSERT(var == NULL || var->mode() != Variable::CONST);
3451 ASSERT_NE(Token::INIT_CONST, node->op());
3452 if (is_trivial_receiver) {
3453 // Load the receiver and swap with the value.
3454 Load(prop->obj());
3455 Register t0 = frame_->PopToRegister();
3456 Register t1 = frame_->PopToRegister(t0);
3457 frame_->EmitPush(t0);
3458 frame_->EmitPush(t1);
3459 }
3460 CodeForSourcePosition(node->position());
3461 bool is_contextual = (var != NULL);
3462 EmitNamedStore(name, is_contextual);
3463 frame_->EmitPush(r0);
3464
3465 // Change to fast case at the end of an initialization block.
3466 if (node->ends_initialization_block()) {
3467 ASSERT_EQ(NULL, var);
3468 // The argument to the runtime call is the receiver.
3469 if (is_trivial_receiver) {
3470 Load(prop->obj());
3471 } else {
3472 // A copy of the receiver is below the value of the assignment. Swap
3473 // the receiver and the value of the assignment expression.
3474 Register t0 = frame_->PopToRegister();
3475 Register t1 = frame_->PopToRegister(t0);
3476 frame_->EmitPush(t0);
3477 frame_->EmitPush(t1);
3478 }
3479 frame_->CallRuntime(Runtime::kToFastProperties, 1);
3480 }
3481
3482 // Stack layout:
3483 // [tos] : result
3484
3485 ASSERT_EQ(original_height + 1, frame_->height());
3486}
3487
3488
3489void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3490#ifdef DEBUG
3491 int original_height = frame_->height();
3492#endif
3493 Comment cmnt(masm_, "[ Keyed Property Assignment");
3494 Property* prop = node->target()->AsProperty();
3495 ASSERT_NOT_NULL(prop);
3496
3497 // Evaluate the receiver subexpression.
3498 Load(prop->obj());
3499
3500 // Change to slow case in the beginning of an initialization block to
3501 // avoid the quadratic behavior of repeatedly adding fast properties.
3502 if (node->starts_initialization_block()) {
3503 frame_->Dup();
3504 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3505 }
3506
3507 // Change to fast case at the end of an initialization block. To prepare for
3508 // that add an extra copy of the receiver to the frame, so that it can be
3509 // converted back to fast case after the assignment.
3510 if (node->ends_initialization_block()) {
3511 frame_->Dup();
3512 }
3513
3514 // Evaluate the key subexpression.
3515 Load(prop->key());
3516
3517 // Stack layout:
3518 // [tos] : key
3519 // [tos+1] : receiver
3520 // [tos+2] : receiver if at the end of an initialization block
3521
3522 // Evaluate the right-hand side.
3523 if (node->is_compound()) {
3524 // For a compound assignment the right-hand side is a binary operation
3525 // between the current property value and the actual right-hand side.
Kristian Monsen25f61362010-05-21 11:50:48 +01003526 // Duplicate receiver and key for loading the current property value.
3527 frame_->Dup2();
Steve Block6ded16b2010-05-10 14:33:55 +01003528 EmitKeyedLoad();
3529 frame_->EmitPush(r0);
3530
3531 // Perform the binary operation.
3532 Literal* literal = node->value()->AsLiteral();
3533 bool overwrite_value =
3534 (node->value()->AsBinaryOperation() != NULL &&
3535 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3536 if (literal != NULL && literal->handle()->IsSmi()) {
3537 SmiOperation(node->binary_op(),
3538 literal->handle(),
3539 false,
3540 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3541 } else {
3542 Load(node->value());
3543 VirtualFrameBinaryOperation(
3544 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3545 }
3546 } else {
3547 // For non-compound assignment just load the right-hand side.
3548 Load(node->value());
3549 }
3550
3551 // Stack layout:
3552 // [tos] : value
3553 // [tos+1] : key
3554 // [tos+2] : receiver
3555 // [tos+3] : receiver if at the end of an initialization block
3556
3557 // Perform the assignment. It is safe to ignore constants here.
3558 ASSERT(node->op() != Token::INIT_CONST);
3559 CodeForSourcePosition(node->position());
Steve Block6ded16b2010-05-10 14:33:55 +01003560 EmitKeyedStore(prop->key()->type());
Steve Block6ded16b2010-05-10 14:33:55 +01003561 frame_->EmitPush(r0);
3562
3563 // Stack layout:
3564 // [tos] : result
3565 // [tos+1] : receiver if at the end of an initialization block
3566
3567 // Change to fast case at the end of an initialization block.
3568 if (node->ends_initialization_block()) {
3569 // The argument to the runtime call is the extra copy of the receiver,
3570 // which is below the value of the assignment. Swap the receiver and
3571 // the value of the assignment expression.
3572 Register t0 = frame_->PopToRegister();
3573 Register t1 = frame_->PopToRegister(t0);
3574 frame_->EmitPush(t1);
3575 frame_->EmitPush(t0);
3576 frame_->CallRuntime(Runtime::kToFastProperties, 1);
3577 }
3578
3579 // Stack layout:
3580 // [tos] : result
3581
3582 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003583}
3584
3585
3586void CodeGenerator::VisitAssignment(Assignment* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01003587 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00003588#ifdef DEBUG
3589 int original_height = frame_->height();
3590#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003591 Comment cmnt(masm_, "[ Assignment");
3592
Steve Block6ded16b2010-05-10 14:33:55 +01003593 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3594 Property* prop = node->target()->AsProperty();
Steve Blocka7e24c12009-10-30 11:49:00 +00003595
Steve Block6ded16b2010-05-10 14:33:55 +01003596 if (var != NULL && !var->is_global()) {
3597 EmitSlotAssignment(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00003598
Steve Block6ded16b2010-05-10 14:33:55 +01003599 } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
3600 (var != NULL && var->is_global())) {
3601 // Properties whose keys are property names and global variables are
3602 // treated as named property references. We do not need to consider
3603 // global 'this' because it is not a valid left-hand side.
3604 EmitNamedPropertyAssignment(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00003605
Steve Block6ded16b2010-05-10 14:33:55 +01003606 } else if (prop != NULL) {
3607 // Other properties (including rewritten parameters for a function that
3608 // uses arguments) are keyed property assignments.
3609 EmitKeyedPropertyAssignment(node);
3610
3611 } else {
3612 // Invalid left-hand side.
3613 Load(node->target());
3614 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
3615 // The runtime call doesn't actually return but the code generator will
3616 // still generate code and expects a certain frame height.
3617 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003618 }
Steve Block6ded16b2010-05-10 14:33:55 +01003619 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003620}
3621
3622
3623void CodeGenerator::VisitThrow(Throw* node) {
3624#ifdef DEBUG
3625 int original_height = frame_->height();
3626#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003627 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003628 Comment cmnt(masm_, "[ Throw");
3629
3630 LoadAndSpill(node->exception());
3631 CodeForSourcePosition(node->position());
3632 frame_->CallRuntime(Runtime::kThrow, 1);
3633 frame_->EmitPush(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01003634 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003635}
3636
3637
3638void CodeGenerator::VisitProperty(Property* node) {
3639#ifdef DEBUG
3640 int original_height = frame_->height();
3641#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 Comment cmnt(masm_, "[ Property");
3643
3644 { Reference property(this, node);
Steve Block6ded16b2010-05-10 14:33:55 +01003645 property.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00003646 }
Steve Block6ded16b2010-05-10 14:33:55 +01003647 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003648}
3649
3650
3651void CodeGenerator::VisitCall(Call* node) {
3652#ifdef DEBUG
3653 int original_height = frame_->height();
3654#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003655 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003656 Comment cmnt(masm_, "[ Call");
3657
3658 Expression* function = node->expression();
3659 ZoneList<Expression*>* args = node->arguments();
3660
3661 // Standard function call.
3662 // Check if the function is a variable or a property.
3663 Variable* var = function->AsVariableProxy()->AsVariable();
3664 Property* property = function->AsProperty();
3665
3666 // ------------------------------------------------------------------------
3667 // Fast-case: Use inline caching.
3668 // ---
3669 // According to ECMA-262, section 11.2.3, page 44, the function to call
3670 // must be resolved after the arguments have been evaluated. The IC code
3671 // automatically handles this by loading the arguments before the function
3672 // is resolved in cache misses (this also holds for megamorphic calls).
3673 // ------------------------------------------------------------------------
3674
3675 if (var != NULL && var->is_possibly_eval()) {
3676 // ----------------------------------
3677 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
3678 // ----------------------------------
3679
3680 // In a call to eval, we first call %ResolvePossiblyDirectEval to
3681 // resolve the function we need to call and the receiver of the
3682 // call. Then we call the resolved function using the given
3683 // arguments.
3684 // Prepare stack for call to resolved function.
3685 LoadAndSpill(function);
3686 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3687 frame_->EmitPush(r2); // Slot for receiver
3688 int arg_count = args->length();
3689 for (int i = 0; i < arg_count; i++) {
3690 LoadAndSpill(args->at(i));
3691 }
3692
3693 // Prepare stack for call to ResolvePossiblyDirectEval.
3694 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
3695 frame_->EmitPush(r1);
3696 if (arg_count > 0) {
3697 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3698 frame_->EmitPush(r1);
3699 } else {
3700 frame_->EmitPush(r2);
3701 }
3702
Leon Clarkee46be812010-01-19 14:06:41 +00003703 // Push the receiver.
3704 __ ldr(r1, frame_->Receiver());
3705 frame_->EmitPush(r1);
3706
Steve Blocka7e24c12009-10-30 11:49:00 +00003707 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00003708 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003709
3710 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00003711 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00003712 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3713
3714 // Call the function.
3715 CodeForSourcePosition(node->position());
3716
3717 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00003718 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003719 frame_->CallStub(&call_function, arg_count + 1);
3720
3721 __ ldr(cp, frame_->Context());
3722 // Remove the function from the stack.
3723 frame_->Drop();
3724 frame_->EmitPush(r0);
3725
3726 } else if (var != NULL && !var->is_this() && var->is_global()) {
3727 // ----------------------------------
3728 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3729 // ----------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00003730 // Pass the global object as the receiver and let the IC stub
3731 // patch the stack to use the global proxy as 'this' in the
3732 // invoked function.
3733 LoadGlobal();
3734
3735 // Load the arguments.
3736 int arg_count = args->length();
3737 for (int i = 0; i < arg_count; i++) {
3738 LoadAndSpill(args->at(i));
3739 }
3740
Andrei Popescu402d9372010-02-26 13:31:12 +00003741 // Setup the name register and call the IC initialization code.
3742 __ mov(r2, Operand(var->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003743 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3744 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3745 CodeForSourcePosition(node->position());
3746 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3747 arg_count + 1);
3748 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00003749 frame_->EmitPush(r0);
3750
3751 } else if (var != NULL && var->slot() != NULL &&
3752 var->slot()->type() == Slot::LOOKUP) {
3753 // ----------------------------------
Kristian Monsen25f61362010-05-21 11:50:48 +01003754 // JavaScript examples:
3755 //
3756 // with (obj) foo(1, 2, 3) // foo may be in obj.
3757 //
3758 // function f() {};
3759 // function g() {
3760 // eval(...);
3761 // f(); // f could be in extension object.
3762 // }
Steve Blocka7e24c12009-10-30 11:49:00 +00003763 // ----------------------------------
3764
Kristian Monsen25f61362010-05-21 11:50:48 +01003765 // JumpTargets do not yet support merging frames so the frame must be
3766 // spilled when jumping to these targets.
3767 JumpTarget slow, done;
3768
3769 // Generate fast case for loading functions from slots that
3770 // correspond to local/global variables or arguments unless they
3771 // are shadowed by eval-introduced bindings.
3772 EmitDynamicLoadFromSlotFastCase(var->slot(),
3773 NOT_INSIDE_TYPEOF,
3774 &slow,
3775 &done);
3776
3777 slow.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00003778 // Load the function
3779 frame_->EmitPush(cp);
3780 __ mov(r0, Operand(var->name()));
3781 frame_->EmitPush(r0);
3782 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3783 // r0: slot value; r1: receiver
3784
3785 // Load the receiver.
3786 frame_->EmitPush(r0); // function
3787 frame_->EmitPush(r1); // receiver
3788
Kristian Monsen25f61362010-05-21 11:50:48 +01003789 // If fast case code has been generated, emit code to push the
3790 // function and receiver and have the slow path jump around this
3791 // code.
3792 if (done.is_linked()) {
3793 JumpTarget call;
3794 call.Jump();
3795 done.Bind();
3796 frame_->EmitPush(r0); // function
3797 LoadGlobalReceiver(r1); // receiver
3798 call.Bind();
3799 }
3800
3801 // Call the function. At this point, everything is spilled but the
3802 // function and receiver are in r0 and r1.
Leon Clarkee46be812010-01-19 14:06:41 +00003803 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003804 frame_->EmitPush(r0);
3805
3806 } else if (property != NULL) {
3807 // Check if the key is a literal string.
3808 Literal* literal = property->key()->AsLiteral();
3809
3810 if (literal != NULL && literal->handle()->IsSymbol()) {
3811 // ------------------------------------------------------------------
3812 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3813 // ------------------------------------------------------------------
3814
Steve Block6ded16b2010-05-10 14:33:55 +01003815 Handle<String> name = Handle<String>::cast(literal->handle());
Steve Blocka7e24c12009-10-30 11:49:00 +00003816
Steve Block6ded16b2010-05-10 14:33:55 +01003817 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
3818 name->IsEqualTo(CStrVector("apply")) &&
3819 args->length() == 2 &&
3820 args->at(1)->AsVariableProxy() != NULL &&
3821 args->at(1)->AsVariableProxy()->IsArguments()) {
3822 // Use the optimized Function.prototype.apply that avoids
3823 // allocating lazily allocated arguments objects.
3824 CallApplyLazy(property->obj(),
3825 args->at(0),
3826 args->at(1)->AsVariableProxy(),
3827 node->position());
3828
3829 } else {
3830 LoadAndSpill(property->obj()); // Receiver.
3831 // Load the arguments.
3832 int arg_count = args->length();
3833 for (int i = 0; i < arg_count; i++) {
3834 LoadAndSpill(args->at(i));
3835 }
3836
3837 // Set the name register and call the IC initialization code.
3838 __ mov(r2, Operand(name));
3839 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3840 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3841 CodeForSourcePosition(node->position());
3842 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3843 __ ldr(cp, frame_->Context());
3844 frame_->EmitPush(r0);
3845 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003846
3847 } else {
3848 // -------------------------------------------
3849 // JavaScript example: 'array[index](1, 2, 3)'
3850 // -------------------------------------------
3851
Leon Clarked91b9f72010-01-27 17:25:45 +00003852 LoadAndSpill(property->obj());
Kristian Monsen25f61362010-05-21 11:50:48 +01003853 if (!property->is_synthetic()) {
3854 // Duplicate receiver for later use.
3855 __ ldr(r0, MemOperand(sp, 0));
3856 frame_->EmitPush(r0);
3857 }
Leon Clarked91b9f72010-01-27 17:25:45 +00003858 LoadAndSpill(property->key());
Steve Block6ded16b2010-05-10 14:33:55 +01003859 EmitKeyedLoad();
Leon Clarked91b9f72010-01-27 17:25:45 +00003860 // Put the function below the receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003861 if (property->is_synthetic()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00003862 // Use the global receiver.
Kristian Monsen25f61362010-05-21 11:50:48 +01003863 frame_->EmitPush(r0); // Function.
Steve Blocka7e24c12009-10-30 11:49:00 +00003864 LoadGlobalReceiver(r0);
3865 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01003866 // Switch receiver and function.
3867 frame_->EmitPop(r1); // Receiver.
3868 frame_->EmitPush(r0); // Function.
3869 frame_->EmitPush(r1); // Receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003870 }
3871
3872 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003873 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003874 frame_->EmitPush(r0);
3875 }
3876
3877 } else {
3878 // ----------------------------------
3879 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3880 // ----------------------------------
3881
3882 // Load the function.
3883 LoadAndSpill(function);
3884
3885 // Pass the global proxy as the receiver.
3886 LoadGlobalReceiver(r0);
3887
3888 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003889 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003890 frame_->EmitPush(r0);
3891 }
Steve Block6ded16b2010-05-10 14:33:55 +01003892 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003893}
3894
3895
3896void CodeGenerator::VisitCallNew(CallNew* node) {
3897#ifdef DEBUG
3898 int original_height = frame_->height();
3899#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003900 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003901 Comment cmnt(masm_, "[ CallNew");
3902
3903 // According to ECMA-262, section 11.2.2, page 44, the function
3904 // expression in new calls must be evaluated before the
3905 // arguments. This is different from ordinary calls, where the
3906 // actual function to call is resolved after the arguments have been
3907 // evaluated.
3908
3909 // Compute function to call and use the global object as the
3910 // receiver. There is no need to use the global proxy here because
3911 // it will always be replaced with a newly allocated object.
3912 LoadAndSpill(node->expression());
3913 LoadGlobal();
3914
3915 // Push the arguments ("left-to-right") on the stack.
3916 ZoneList<Expression*>* args = node->arguments();
3917 int arg_count = args->length();
3918 for (int i = 0; i < arg_count; i++) {
3919 LoadAndSpill(args->at(i));
3920 }
3921
3922 // r0: the number of arguments.
Steve Blocka7e24c12009-10-30 11:49:00 +00003923 __ mov(r0, Operand(arg_count));
Steve Blocka7e24c12009-10-30 11:49:00 +00003924 // Load the function into r1 as per calling convention.
Steve Blocka7e24c12009-10-30 11:49:00 +00003925 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3926
3927 // Call the construct call builtin that handles allocation and
3928 // constructor invocation.
3929 CodeForSourcePosition(node->position());
3930 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
Leon Clarke4515c472010-02-03 11:58:03 +00003931 frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003932
3933 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3934 __ str(r0, frame_->Top());
Steve Block6ded16b2010-05-10 14:33:55 +01003935 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003936}
3937
3938
3939void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01003940 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003941 ASSERT(args->length() == 1);
3942 JumpTarget leave, null, function, non_function_constructor;
3943
3944 // Load the object into r0.
3945 LoadAndSpill(args->at(0));
3946 frame_->EmitPop(r0);
3947
3948 // If the object is a smi, we return null.
3949 __ tst(r0, Operand(kSmiTagMask));
3950 null.Branch(eq);
3951
3952 // Check that the object is a JS object but take special care of JS
3953 // functions to make sure they have 'Function' as their class.
3954 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3955 null.Branch(lt);
3956
3957 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3958 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3959 // LAST_JS_OBJECT_TYPE.
3960 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3961 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3962 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3963 function.Branch(eq);
3964
3965 // Check if the constructor in the map is a function.
3966 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3967 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3968 non_function_constructor.Branch(ne);
3969
3970 // The r0 register now contains the constructor function. Grab the
3971 // instance class name from there.
3972 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3973 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3974 frame_->EmitPush(r0);
3975 leave.Jump();
3976
3977 // Functions have class 'Function'.
3978 function.Bind();
3979 __ mov(r0, Operand(Factory::function_class_symbol()));
3980 frame_->EmitPush(r0);
3981 leave.Jump();
3982
3983 // Objects with a non-function constructor have class 'Object'.
3984 non_function_constructor.Bind();
3985 __ mov(r0, Operand(Factory::Object_symbol()));
3986 frame_->EmitPush(r0);
3987 leave.Jump();
3988
3989 // Non-JS objects have class null.
3990 null.Bind();
3991 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3992 frame_->EmitPush(r0);
3993
3994 // All done.
3995 leave.Bind();
3996}
3997
3998
3999void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004000 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004001 ASSERT(args->length() == 1);
4002 JumpTarget leave;
4003 LoadAndSpill(args->at(0));
4004 frame_->EmitPop(r0); // r0 contains object.
4005 // if (object->IsSmi()) return the object.
4006 __ tst(r0, Operand(kSmiTagMask));
4007 leave.Branch(eq);
4008 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4009 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
4010 leave.Branch(ne);
4011 // Load the value.
4012 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
4013 leave.Bind();
4014 frame_->EmitPush(r0);
4015}
4016
4017
4018void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004019 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004020 ASSERT(args->length() == 2);
4021 JumpTarget leave;
4022 LoadAndSpill(args->at(0)); // Load the object.
4023 LoadAndSpill(args->at(1)); // Load the value.
4024 frame_->EmitPop(r0); // r0 contains value
4025 frame_->EmitPop(r1); // r1 contains object
4026 // if (object->IsSmi()) return object.
4027 __ tst(r1, Operand(kSmiTagMask));
4028 leave.Branch(eq);
4029 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4030 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
4031 leave.Branch(ne);
4032 // Store the value.
4033 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
4034 // Update the write barrier.
4035 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
4036 __ RecordWrite(r1, r2, r3);
4037 // Leave.
4038 leave.Bind();
4039 frame_->EmitPush(r0);
4040}
4041
4042
4043void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004044 ASSERT(args->length() == 1);
Leon Clarkef7060e22010-06-03 12:02:55 +01004045 Load(args->at(0));
4046 Register reg = frame_->PopToRegister();
4047 __ tst(reg, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004048 cc_reg_ = eq;
4049}
4050
4051
4052void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004053 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4054 ASSERT_EQ(args->length(), 3);
4055#ifdef ENABLE_LOGGING_AND_PROFILING
4056 if (ShouldGenerateLog(args->at(0))) {
Leon Clarkef7060e22010-06-03 12:02:55 +01004057 Load(args->at(1));
4058 Load(args->at(2));
4059 frame_->SpillAll();
4060 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004061 __ CallRuntime(Runtime::kLog, 2);
4062 }
4063#endif
Leon Clarkef7060e22010-06-03 12:02:55 +01004064 frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00004065}
4066
4067
4068void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004069 ASSERT(args->length() == 1);
Leon Clarkef7060e22010-06-03 12:02:55 +01004070 Load(args->at(0));
4071 Register reg = frame_->PopToRegister();
4072 __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
Steve Blocka7e24c12009-10-30 11:49:00 +00004073 cc_reg_ = eq;
4074}
4075
4076
Steve Block6ded16b2010-05-10 14:33:55 +01004077// Generates the Math.pow method - currently just calls runtime.
4078void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4079 ASSERT(args->length() == 2);
4080 Load(args->at(0));
4081 Load(args->at(1));
4082 frame_->CallRuntime(Runtime::kMath_pow, 2);
4083 frame_->EmitPush(r0);
4084}
4085
4086
4087// Generates the Math.sqrt method - currently just calls runtime.
4088void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4089 ASSERT(args->length() == 1);
4090 Load(args->at(0));
4091 frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4092 frame_->EmitPush(r0);
4093}
4094
4095
4096// This generates code that performs a charCodeAt() call or returns
Steve Blocka7e24c12009-10-30 11:49:00 +00004097// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
Steve Block6ded16b2010-05-10 14:33:55 +01004098// It can handle flat, 8 and 16 bit characters and cons strings where the
4099// answer is found in the left hand branch of the cons. The slow case will
4100// flatten the string, which will ensure that the answer is in the left hand
4101// side the next time around.
Steve Blocka7e24c12009-10-30 11:49:00 +00004102void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004103 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00004104 Comment(masm_, "[ GenerateFastCharCodeAt");
4105
Leon Clarkef7060e22010-06-03 12:02:55 +01004106 Load(args->at(0));
4107 Load(args->at(1));
4108 Register index = frame_->PopToRegister(); // Index.
4109 Register string = frame_->PopToRegister(index); // String.
4110 Register result = VirtualFrame::scratch0();
4111 Register scratch = VirtualFrame::scratch1();
Steve Blockd0582a62009-12-15 09:54:21 +00004112
Steve Block6ded16b2010-05-10 14:33:55 +01004113 Label slow_case;
4114 Label exit;
4115 StringHelper::GenerateFastCharCodeAt(masm_,
Leon Clarkef7060e22010-06-03 12:02:55 +01004116 string,
4117 index,
4118 scratch,
4119 result,
Steve Block6ded16b2010-05-10 14:33:55 +01004120 &slow_case,
4121 &slow_case,
4122 &slow_case,
4123 &slow_case);
4124 __ jmp(&exit);
Steve Blockd0582a62009-12-15 09:54:21 +00004125
Steve Block6ded16b2010-05-10 14:33:55 +01004126 __ bind(&slow_case);
4127 // Move the undefined value into the result register, which will
4128 // trigger the slow case.
Leon Clarkef7060e22010-06-03 12:02:55 +01004129 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00004130
Steve Block6ded16b2010-05-10 14:33:55 +01004131 __ bind(&exit);
Leon Clarkef7060e22010-06-03 12:02:55 +01004132 frame_->EmitPush(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00004133}
4134
4135
Steve Block6ded16b2010-05-10 14:33:55 +01004136void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
4137 Comment(masm_, "[ GenerateCharFromCode");
4138 ASSERT(args->length() == 1);
4139
4140 Register code = r1;
4141 Register scratch = ip;
4142 Register result = r0;
4143
4144 LoadAndSpill(args->at(0));
4145 frame_->EmitPop(code);
4146
4147 StringHelper::GenerateCharFromCode(masm_,
4148 code,
4149 scratch,
4150 result,
4151 CALL_FUNCTION);
4152 frame_->EmitPush(result);
4153}
4154
4155
Steve Blocka7e24c12009-10-30 11:49:00 +00004156void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004157 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 ASSERT(args->length() == 1);
4159 LoadAndSpill(args->at(0));
4160 JumpTarget answer;
4161 // We need the CC bits to come out as not_equal in the case where the
4162 // object is a smi. This can't be done with the usual test opcode so
4163 // we use XOR to get the right CC bits.
4164 frame_->EmitPop(r0);
4165 __ and_(r1, r0, Operand(kSmiTagMask));
4166 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
4167 answer.Branch(ne);
4168 // It is a heap object - get the map. Check if the object is a JS array.
4169 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4170 answer.Bind();
4171 cc_reg_ = eq;
4172}
4173
4174
Andrei Popescu402d9372010-02-26 13:31:12 +00004175void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004176 VirtualFrame::SpilledScope spilled_scope(frame_);
Andrei Popescu402d9372010-02-26 13:31:12 +00004177 ASSERT(args->length() == 1);
4178 LoadAndSpill(args->at(0));
4179 JumpTarget answer;
4180 // We need the CC bits to come out as not_equal in the case where the
4181 // object is a smi. This can't be done with the usual test opcode so
4182 // we use XOR to get the right CC bits.
4183 frame_->EmitPop(r0);
4184 __ and_(r1, r0, Operand(kSmiTagMask));
4185 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
4186 answer.Branch(ne);
4187 // It is a heap object - get the map. Check if the object is a regexp.
4188 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4189 answer.Bind();
4190 cc_reg_ = eq;
4191}
4192
4193
Steve Blockd0582a62009-12-15 09:54:21 +00004194void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
4195 // This generates a fast version of:
4196 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
Steve Block6ded16b2010-05-10 14:33:55 +01004197 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00004198 ASSERT(args->length() == 1);
4199 LoadAndSpill(args->at(0));
4200 frame_->EmitPop(r1);
4201 __ tst(r1, Operand(kSmiTagMask));
4202 false_target()->Branch(eq);
4203
4204 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4205 __ cmp(r1, ip);
4206 true_target()->Branch(eq);
4207
4208 Register map_reg = r2;
4209 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
4210 // Undetectable objects behave like undefined when tested with typeof.
4211 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Leon Clarkef7060e22010-06-03 12:02:55 +01004212 __ tst(r1, Operand(1 << Map::kIsUndetectable));
4213 false_target()->Branch(ne);
Steve Blockd0582a62009-12-15 09:54:21 +00004214
4215 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4216 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
4217 false_target()->Branch(lt);
4218 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
4219 cc_reg_ = le;
4220}
4221
4222
4223void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
4224 // This generates a fast version of:
4225 // (%_ClassOf(arg) === 'Function')
Steve Block6ded16b2010-05-10 14:33:55 +01004226 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00004227 ASSERT(args->length() == 1);
4228 LoadAndSpill(args->at(0));
4229 frame_->EmitPop(r0);
4230 __ tst(r0, Operand(kSmiTagMask));
4231 false_target()->Branch(eq);
4232 Register map_reg = r2;
4233 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
4234 cc_reg_ = eq;
4235}
4236
4237
Leon Clarked91b9f72010-01-27 17:25:45 +00004238void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004239 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarked91b9f72010-01-27 17:25:45 +00004240 ASSERT(args->length() == 1);
4241 LoadAndSpill(args->at(0));
4242 frame_->EmitPop(r0);
4243 __ tst(r0, Operand(kSmiTagMask));
4244 false_target()->Branch(eq);
4245 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
4246 __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
4247 __ tst(r1, Operand(1 << Map::kIsUndetectable));
4248 cc_reg_ = ne;
4249}
4250
4251
Steve Blocka7e24c12009-10-30 11:49:00 +00004252void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004253 ASSERT(args->length() == 0);
4254
Leon Clarkef7060e22010-06-03 12:02:55 +01004255 Register scratch0 = VirtualFrame::scratch0();
4256 Register scratch1 = VirtualFrame::scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00004257 // Get the frame pointer for the calling frame.
Leon Clarkef7060e22010-06-03 12:02:55 +01004258 __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004259
4260 // Skip the arguments adaptor frame if it exists.
Leon Clarkef7060e22010-06-03 12:02:55 +01004261 __ ldr(scratch1,
4262 MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4263 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4264 __ ldr(scratch0,
4265 MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
Steve Blocka7e24c12009-10-30 11:49:00 +00004266
4267 // Check the marker in the calling frame.
Leon Clarkef7060e22010-06-03 12:02:55 +01004268 __ ldr(scratch1,
4269 MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
4270 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004271 cc_reg_ = eq;
4272}
4273
4274
4275void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004276 ASSERT(args->length() == 0);
4277
Leon Clarkef7060e22010-06-03 12:02:55 +01004278 Register tos = frame_->GetTOSRegister();
4279 Register scratch0 = VirtualFrame::scratch0();
4280 Register scratch1 = VirtualFrame::scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00004281
Steve Block6ded16b2010-05-10 14:33:55 +01004282 // Check if the calling frame is an arguments adaptor frame.
Leon Clarkef7060e22010-06-03 12:02:55 +01004283 __ ldr(scratch0,
4284 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4285 __ ldr(scratch1,
4286 MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4287 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4288
4289 // Get the number of formal parameters.
4290 __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
Steve Block6ded16b2010-05-10 14:33:55 +01004291
4292 // Arguments adaptor case: Read the arguments length from the
4293 // adaptor frame.
Leon Clarkef7060e22010-06-03 12:02:55 +01004294 __ ldr(tos,
4295 MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
4296 eq);
Steve Block6ded16b2010-05-10 14:33:55 +01004297
Leon Clarkef7060e22010-06-03 12:02:55 +01004298 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00004299}
4300
4301
Steve Block6ded16b2010-05-10 14:33:55 +01004302void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
4303 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004304 ASSERT(args->length() == 1);
4305
4306 // Satisfy contract with ArgumentsAccessStub:
4307 // Load the key into r1 and the formal parameters count into r0.
4308 LoadAndSpill(args->at(0));
4309 frame_->EmitPop(r1);
Andrei Popescu31002712010-02-23 13:46:05 +00004310 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00004311
4312 // Call the shared stub to get to arguments[key].
4313 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
4314 frame_->CallStub(&stub, 0);
4315 frame_->EmitPush(r0);
4316}
4317
4318
Steve Block6ded16b2010-05-10 14:33:55 +01004319void CodeGenerator::GenerateRandomHeapNumber(
4320 ZoneList<Expression*>* args) {
4321 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004322 ASSERT(args->length() == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01004323
4324 Label slow_allocate_heapnumber;
4325 Label heapnumber_allocated;
4326
4327 __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
4328 __ jmp(&heapnumber_allocated);
4329
4330 __ bind(&slow_allocate_heapnumber);
4331 // To allocate a heap number, and ensure that it is not a smi, we
4332 // call the runtime function FUnaryMinus on 0, returning the double
4333 // -0.0. A new, distinct heap number is returned each time.
4334 __ mov(r0, Operand(Smi::FromInt(0)));
4335 __ push(r0);
4336 __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4337 __ mov(r4, Operand(r0));
4338
4339 __ bind(&heapnumber_allocated);
4340
4341 // Convert 32 random bits in r0 to 0.(32 random bits) in a double
4342 // by computing:
4343 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4344 if (CpuFeatures::IsSupported(VFP3)) {
4345 __ PrepareCallCFunction(0, r1);
4346 __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4347
4348 CpuFeatures::Scope scope(VFP3);
4349 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
4350 // Create this constant using mov/orr to avoid PC relative load.
4351 __ mov(r1, Operand(0x41000000));
4352 __ orr(r1, r1, Operand(0x300000));
4353 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
4354 __ vmov(d7, r0, r1);
4355 // Move 0x4130000000000000 to VFP.
4356 __ mov(r0, Operand(0));
4357 __ vmov(d8, r0, r1);
4358 // Subtract and store the result in the heap number.
4359 __ vsub(d7, d7, d8);
4360 __ sub(r0, r4, Operand(kHeapObjectTag));
4361 __ vstr(d7, r0, HeapNumber::kValueOffset);
4362 frame_->EmitPush(r4);
4363 } else {
4364 __ mov(r0, Operand(r4));
4365 __ PrepareCallCFunction(1, r1);
4366 __ CallCFunction(
4367 ExternalReference::fill_heap_number_with_random_function(), 1);
4368 frame_->EmitPush(r0);
4369 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004370}
4371
4372
Steve Blockd0582a62009-12-15 09:54:21 +00004373void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4374 ASSERT_EQ(2, args->length());
4375
4376 Load(args->at(0));
4377 Load(args->at(1));
4378
Andrei Popescu31002712010-02-23 13:46:05 +00004379 StringAddStub stub(NO_STRING_ADD_FLAGS);
4380 frame_->CallStub(&stub, 2);
Steve Blockd0582a62009-12-15 09:54:21 +00004381 frame_->EmitPush(r0);
4382}
4383
4384
Leon Clarkee46be812010-01-19 14:06:41 +00004385void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4386 ASSERT_EQ(3, args->length());
4387
4388 Load(args->at(0));
4389 Load(args->at(1));
4390 Load(args->at(2));
4391
Andrei Popescu31002712010-02-23 13:46:05 +00004392 SubStringStub stub;
4393 frame_->CallStub(&stub, 3);
Leon Clarkee46be812010-01-19 14:06:41 +00004394 frame_->EmitPush(r0);
4395}
4396
4397
4398void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4399 ASSERT_EQ(2, args->length());
4400
4401 Load(args->at(0));
4402 Load(args->at(1));
4403
Leon Clarked91b9f72010-01-27 17:25:45 +00004404 StringCompareStub stub;
4405 frame_->CallStub(&stub, 2);
Leon Clarkee46be812010-01-19 14:06:41 +00004406 frame_->EmitPush(r0);
4407}
4408
4409
4410void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4411 ASSERT_EQ(4, args->length());
4412
4413 Load(args->at(0));
4414 Load(args->at(1));
4415 Load(args->at(2));
4416 Load(args->at(3));
Steve Block6ded16b2010-05-10 14:33:55 +01004417 RegExpExecStub stub;
4418 frame_->CallStub(&stub, 4);
4419 frame_->EmitPush(r0);
4420}
Leon Clarkee46be812010-01-19 14:06:41 +00004421
Steve Block6ded16b2010-05-10 14:33:55 +01004422
4423void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4424 // No stub. This code only occurs a few times in regexp.js.
4425 const int kMaxInlineLength = 100;
4426 ASSERT_EQ(3, args->length());
4427 Load(args->at(0)); // Size of array, smi.
4428 Load(args->at(1)); // "index" property value.
4429 Load(args->at(2)); // "input" property value.
4430 {
4431 VirtualFrame::SpilledScope spilled_scope(frame_);
4432 Label slowcase;
4433 Label done;
4434 __ ldr(r1, MemOperand(sp, kPointerSize * 2));
4435 STATIC_ASSERT(kSmiTag == 0);
4436 STATIC_ASSERT(kSmiTagSize == 1);
4437 __ tst(r1, Operand(kSmiTagMask));
4438 __ b(ne, &slowcase);
4439 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
4440 __ b(hi, &slowcase);
4441 // Smi-tagging is equivalent to multiplying by 2.
4442 // Allocate RegExpResult followed by FixedArray with size in ebx.
4443 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4444 // Elements: [Map][Length][..elements..]
4445 // Size of JSArray with two in-object properties and the header of a
4446 // FixedArray.
4447 int objects_size =
4448 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4449 __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
4450 __ add(r2, r5, Operand(objects_size));
Kristian Monsen25f61362010-05-21 11:50:48 +01004451 __ AllocateInNewSpace(
4452 r2, // In: Size, in words.
4453 r0, // Out: Start of allocation (tagged).
4454 r3, // Scratch register.
4455 r4, // Scratch register.
4456 &slowcase,
4457 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
Steve Block6ded16b2010-05-10 14:33:55 +01004458 // r0: Start of allocated area, object-tagged.
4459 // r1: Number of elements in array, as smi.
4460 // r5: Number of elements, untagged.
4461
4462 // Set JSArray map to global.regexp_result_map().
4463 // Set empty properties FixedArray.
4464 // Set elements to point to FixedArray allocated right after the JSArray.
4465 // Interleave operations for better latency.
4466 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
4467 __ add(r3, r0, Operand(JSRegExpResult::kSize));
4468 __ mov(r4, Operand(Factory::empty_fixed_array()));
4469 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4470 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
4471 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
4472 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
4473 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4474
4475 // Set input, index and length fields from arguments.
4476 __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
4477 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
4478 __ add(sp, sp, Operand(kPointerSize));
4479 __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
4480 __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
4481
4482 // Fill out the elements FixedArray.
4483 // r0: JSArray, tagged.
4484 // r3: FixedArray, tagged.
4485 // r5: Number of elements in array, untagged.
4486
4487 // Set map.
4488 __ mov(r2, Operand(Factory::fixed_array_map()));
4489 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
4490 // Set FixedArray length.
4491 __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
4492 // Fill contents of fixed-array with the-hole.
4493 __ mov(r2, Operand(Factory::the_hole_value()));
4494 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4495 // Fill fixed array elements with hole.
4496 // r0: JSArray, tagged.
4497 // r2: the hole.
4498 // r3: Start of elements in FixedArray.
4499 // r5: Number of elements to fill.
4500 Label loop;
4501 __ tst(r5, Operand(r5));
4502 __ bind(&loop);
4503 __ b(le, &done); // Jump if r1 is negative or zero.
4504 __ sub(r5, r5, Operand(1), SetCC);
4505 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
4506 __ jmp(&loop);
4507
4508 __ bind(&slowcase);
4509 __ CallRuntime(Runtime::kRegExpConstructResult, 3);
4510
4511 __ bind(&done);
4512 }
4513 frame_->Forget(3);
4514 frame_->EmitPush(r0);
4515}
4516
4517
4518class DeferredSearchCache: public DeferredCode {
4519 public:
4520 DeferredSearchCache(Register dst, Register cache, Register key)
4521 : dst_(dst), cache_(cache), key_(key) {
4522 set_comment("[ DeferredSearchCache");
4523 }
4524
4525 virtual void Generate();
4526
4527 private:
4528 Register dst_, cache_, key_;
4529};
4530
4531
4532void DeferredSearchCache::Generate() {
4533 __ Push(cache_, key_);
4534 __ CallRuntime(Runtime::kGetFromCache, 2);
4535 if (!dst_.is(r0)) {
4536 __ mov(dst_, r0);
4537 }
4538}
4539
4540
4541void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
4542 ASSERT_EQ(2, args->length());
4543
4544 ASSERT_NE(NULL, args->at(0)->AsLiteral());
4545 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
4546
4547 Handle<FixedArray> jsfunction_result_caches(
4548 Top::global_context()->jsfunction_result_caches());
4549 if (jsfunction_result_caches->length() <= cache_id) {
4550 __ Abort("Attempt to use undefined cache.");
4551 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
4552 frame_->EmitPush(r0);
4553 return;
4554 }
4555
4556 Load(args->at(1));
4557 frame_->EmitPop(r2);
4558
4559 __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
4560 __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
4561 __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
4562 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
4563
4564 DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
4565
4566 const int kFingerOffset =
4567 FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
4568 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4569 __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
4570 // r0 now holds finger offset as a smi.
4571 __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4572 // r3 now points to the start of fixed array elements.
4573 __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
4574 // Note side effect of PreIndex: r3 now points to the key of the pair.
4575 __ cmp(r2, r0);
4576 deferred->Branch(ne);
4577
4578 __ ldr(r0, MemOperand(r3, kPointerSize));
4579
4580 deferred->BindExit();
Leon Clarkee46be812010-01-19 14:06:41 +00004581 frame_->EmitPush(r0);
4582}
4583
4584
Andrei Popescu402d9372010-02-26 13:31:12 +00004585void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
4586 ASSERT_EQ(args->length(), 1);
4587
4588 // Load the argument on the stack and jump to the runtime.
4589 Load(args->at(0));
4590
Steve Block6ded16b2010-05-10 14:33:55 +01004591 NumberToStringStub stub;
4592 frame_->CallStub(&stub, 1);
4593 frame_->EmitPush(r0);
4594}
4595
4596
4597class DeferredSwapElements: public DeferredCode {
4598 public:
4599 DeferredSwapElements(Register object, Register index1, Register index2)
4600 : object_(object), index1_(index1), index2_(index2) {
4601 set_comment("[ DeferredSwapElements");
4602 }
4603
4604 virtual void Generate();
4605
4606 private:
4607 Register object_, index1_, index2_;
4608};
4609
4610
4611void DeferredSwapElements::Generate() {
4612 __ push(object_);
4613 __ push(index1_);
4614 __ push(index2_);
4615 __ CallRuntime(Runtime::kSwapElements, 3);
4616}
4617
4618
4619void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
4620 Comment cmnt(masm_, "[ GenerateSwapElements");
4621
4622 ASSERT_EQ(3, args->length());
4623
4624 Load(args->at(0));
4625 Load(args->at(1));
4626 Load(args->at(2));
4627
4628 Register index2 = r2;
4629 Register index1 = r1;
4630 Register object = r0;
4631 Register tmp1 = r3;
4632 Register tmp2 = r4;
4633
4634 frame_->EmitPop(index2);
4635 frame_->EmitPop(index1);
4636 frame_->EmitPop(object);
4637
4638 DeferredSwapElements* deferred =
4639 new DeferredSwapElements(object, index1, index2);
4640
4641 // Fetch the map and check if array is in fast case.
4642 // Check that object doesn't require security checks and
4643 // has no indexed interceptor.
4644 __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
4645 deferred->Branch(lt);
4646 __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
4647 __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
4648 deferred->Branch(nz);
4649
4650 // Check the object's elements are in fast case.
4651 __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
4652 __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
4653 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
4654 __ cmp(tmp2, ip);
4655 deferred->Branch(ne);
4656
4657 // Smi-tagging is equivalent to multiplying by 2.
4658 STATIC_ASSERT(kSmiTag == 0);
4659 STATIC_ASSERT(kSmiTagSize == 1);
4660
4661 // Check that both indices are smis.
4662 __ mov(tmp2, index1);
4663 __ orr(tmp2, tmp2, index2);
4664 __ tst(tmp2, Operand(kSmiTagMask));
4665 deferred->Branch(nz);
4666
4667 // Bring the offsets into the fixed array in tmp1 into index1 and
4668 // index2.
4669 __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4670 __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
4671 __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
4672
4673 // Swap elements.
4674 Register tmp3 = object;
4675 object = no_reg;
4676 __ ldr(tmp3, MemOperand(tmp1, index1));
4677 __ ldr(tmp2, MemOperand(tmp1, index2));
4678 __ str(tmp3, MemOperand(tmp1, index2));
4679 __ str(tmp2, MemOperand(tmp1, index1));
4680
4681 Label done;
4682 __ InNewSpace(tmp1, tmp2, eq, &done);
4683 // Possible optimization: do a check that both values are Smis
4684 // (or them and test against Smi mask.)
4685
4686 __ mov(tmp2, tmp1);
4687 RecordWriteStub recordWrite1(tmp1, index1, tmp3);
4688 __ CallStub(&recordWrite1);
4689
4690 RecordWriteStub recordWrite2(tmp2, index2, tmp3);
4691 __ CallStub(&recordWrite2);
4692
4693 __ bind(&done);
4694
4695 deferred->BindExit();
4696 __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
4697 frame_->EmitPush(tmp1);
4698}
4699
4700
4701void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
4702 Comment cmnt(masm_, "[ GenerateCallFunction");
4703
4704 ASSERT(args->length() >= 2);
4705
4706 int n_args = args->length() - 2; // for receiver and function.
4707 Load(args->at(0)); // receiver
4708 for (int i = 0; i < n_args; i++) {
4709 Load(args->at(i + 1));
4710 }
4711 Load(args->at(n_args + 1)); // function
4712 frame_->CallJSFunction(n_args);
Andrei Popescu402d9372010-02-26 13:31:12 +00004713 frame_->EmitPush(r0);
4714}
4715
4716
4717void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
4718 ASSERT_EQ(args->length(), 1);
4719 // Load the argument on the stack and jump to the runtime.
4720 Load(args->at(0));
4721 frame_->CallRuntime(Runtime::kMath_sin, 1);
4722 frame_->EmitPush(r0);
4723}
4724
4725
4726void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
4727 ASSERT_EQ(args->length(), 1);
4728 // Load the argument on the stack and jump to the runtime.
4729 Load(args->at(0));
4730 frame_->CallRuntime(Runtime::kMath_cos, 1);
4731 frame_->EmitPush(r0);
4732}
4733
4734
Steve Blocka7e24c12009-10-30 11:49:00 +00004735void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004736 ASSERT(args->length() == 2);
4737
4738 // Load the two objects into registers and perform the comparison.
Leon Clarkef7060e22010-06-03 12:02:55 +01004739 Load(args->at(0));
4740 Load(args->at(1));
4741 Register lhs = frame_->PopToRegister();
4742 Register rhs = frame_->PopToRegister(lhs);
4743 __ cmp(lhs, rhs);
Steve Blocka7e24c12009-10-30 11:49:00 +00004744 cc_reg_ = eq;
4745}
4746
4747
4748void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
4749#ifdef DEBUG
4750 int original_height = frame_->height();
4751#endif
Steve Block6ded16b2010-05-10 14:33:55 +01004752 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004753 if (CheckForInlineRuntimeCall(node)) {
4754 ASSERT((has_cc() && frame_->height() == original_height) ||
4755 (!has_cc() && frame_->height() == original_height + 1));
4756 return;
4757 }
4758
4759 ZoneList<Expression*>* args = node->arguments();
4760 Comment cmnt(masm_, "[ CallRuntime");
4761 Runtime::Function* function = node->function();
4762
4763 if (function == NULL) {
4764 // Prepare stack for calling JS runtime function.
Steve Blocka7e24c12009-10-30 11:49:00 +00004765 // Push the builtins object found in the current global object.
4766 __ ldr(r1, GlobalObject());
4767 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
4768 frame_->EmitPush(r0);
4769 }
4770
4771 // Push the arguments ("left-to-right").
4772 int arg_count = args->length();
4773 for (int i = 0; i < arg_count; i++) {
4774 LoadAndSpill(args->at(i));
4775 }
4776
4777 if (function == NULL) {
4778 // Call the JS runtime function.
Andrei Popescu402d9372010-02-26 13:31:12 +00004779 __ mov(r2, Operand(node->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004780 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4781 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4782 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4783 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00004784 frame_->EmitPush(r0);
4785 } else {
4786 // Call the C runtime function.
4787 frame_->CallRuntime(function, arg_count);
4788 frame_->EmitPush(r0);
4789 }
Steve Block6ded16b2010-05-10 14:33:55 +01004790 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00004791}
4792
4793
4794void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
4795#ifdef DEBUG
4796 int original_height = frame_->height();
4797#endif
Steve Block6ded16b2010-05-10 14:33:55 +01004798 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004799 Comment cmnt(masm_, "[ UnaryOperation");
4800
4801 Token::Value op = node->op();
4802
4803 if (op == Token::NOT) {
4804 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00004805 false_target(),
4806 true_target(),
4807 true);
4808 // LoadCondition may (and usually does) leave a test and branch to
4809 // be emitted by the caller. In that case, negate the condition.
4810 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
4811
4812 } else if (op == Token::DELETE) {
4813 Property* property = node->expression()->AsProperty();
4814 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
4815 if (property != NULL) {
4816 LoadAndSpill(property->obj());
4817 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00004818 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004819
4820 } else if (variable != NULL) {
4821 Slot* slot = variable->slot();
4822 if (variable->is_global()) {
4823 LoadGlobal();
4824 __ mov(r0, Operand(variable->name()));
4825 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004826 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004827
4828 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
4829 // lookup the context holding the named variable
4830 frame_->EmitPush(cp);
4831 __ mov(r0, Operand(variable->name()));
4832 frame_->EmitPush(r0);
4833 frame_->CallRuntime(Runtime::kLookupContext, 2);
4834 // r0: context
4835 frame_->EmitPush(r0);
4836 __ mov(r0, Operand(variable->name()));
4837 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004838 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004839
4840 } else {
4841 // Default: Result of deleting non-global, not dynamically
4842 // introduced variables is false.
4843 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4844 }
4845
4846 } else {
4847 // Default: Result of deleting expressions is true.
4848 LoadAndSpill(node->expression()); // may have side-effects
4849 frame_->Drop();
4850 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4851 }
4852 frame_->EmitPush(r0);
4853
4854 } else if (op == Token::TYPEOF) {
4855 // Special case for loading the typeof expression; see comment on
4856 // LoadTypeofExpression().
4857 LoadTypeofExpression(node->expression());
4858 frame_->CallRuntime(Runtime::kTypeof, 1);
4859 frame_->EmitPush(r0); // r0 has result
4860
4861 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00004862 bool overwrite =
4863 (node->expression()->AsBinaryOperation() != NULL &&
4864 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00004865 LoadAndSpill(node->expression());
4866 frame_->EmitPop(r0);
4867 switch (op) {
4868 case Token::NOT:
4869 case Token::DELETE:
4870 case Token::TYPEOF:
4871 UNREACHABLE(); // handled above
4872 break;
4873
4874 case Token::SUB: {
Leon Clarkee46be812010-01-19 14:06:41 +00004875 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00004876 frame_->CallStub(&stub, 0);
4877 break;
4878 }
4879
4880 case Token::BIT_NOT: {
4881 // smi check
4882 JumpTarget smi_label;
4883 JumpTarget continue_label;
4884 __ tst(r0, Operand(kSmiTagMask));
4885 smi_label.Branch(eq);
4886
Leon Clarke4515c472010-02-03 11:58:03 +00004887 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
4888 frame_->CallStub(&stub, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004889 continue_label.Jump();
Leon Clarke4515c472010-02-03 11:58:03 +00004890
Steve Blocka7e24c12009-10-30 11:49:00 +00004891 smi_label.Bind();
4892 __ mvn(r0, Operand(r0));
4893 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
4894 continue_label.Bind();
4895 break;
4896 }
4897
4898 case Token::VOID:
4899 // since the stack top is cached in r0, popping and then
4900 // pushing a value can be done by just writing to r0.
4901 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
4902 break;
4903
4904 case Token::ADD: {
4905 // Smi check.
4906 JumpTarget continue_label;
4907 __ tst(r0, Operand(kSmiTagMask));
4908 continue_label.Branch(eq);
4909 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004910 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00004911 continue_label.Bind();
4912 break;
4913 }
4914 default:
4915 UNREACHABLE();
4916 }
4917 frame_->EmitPush(r0); // r0 has result
4918 }
4919 ASSERT(!has_valid_frame() ||
4920 (has_cc() && frame_->height() == original_height) ||
4921 (!has_cc() && frame_->height() == original_height + 1));
4922}
4923
4924
4925void CodeGenerator::VisitCountOperation(CountOperation* node) {
4926#ifdef DEBUG
4927 int original_height = frame_->height();
4928#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004929 Comment cmnt(masm_, "[ CountOperation");
4930
4931 bool is_postfix = node->is_postfix();
4932 bool is_increment = node->op() == Token::INC;
4933
4934 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
4935 bool is_const = (var != NULL && var->mode() == Variable::CONST);
4936
Steve Blocka7e24c12009-10-30 11:49:00 +00004937 if (is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004938 frame_->EmitPush(Operand(Smi::FromInt(0)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004939 }
4940
Leon Clarked91b9f72010-01-27 17:25:45 +00004941 // A constant reference is not saved to, so a constant reference is not a
4942 // compound assignment reference.
4943 { Reference target(this, node->expression(), !is_const);
Steve Blocka7e24c12009-10-30 11:49:00 +00004944 if (target.is_illegal()) {
4945 // Spoof the virtual frame to have the expected height (one higher
4946 // than on entry).
4947 if (!is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004948 frame_->EmitPush(Operand(Smi::FromInt(0)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004949 }
Steve Block6ded16b2010-05-10 14:33:55 +01004950 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00004951 return;
4952 }
Kristian Monsen25f61362010-05-21 11:50:48 +01004953 // This pushes 0, 1 or 2 words on the object to be used later when updating
4954 // the target. It also pushes the current value of the target.
Steve Block6ded16b2010-05-10 14:33:55 +01004955 target.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00004956
4957 JumpTarget slow;
4958 JumpTarget exit;
4959
Steve Blocka7e24c12009-10-30 11:49:00 +00004960 // Check for smi operand.
Kristian Monsen25f61362010-05-21 11:50:48 +01004961 Register value = frame_->PopToRegister();
4962 __ tst(value, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004963 slow.Branch(ne);
4964
4965 // Postfix: Store the old value as the result.
4966 if (is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004967 frame_->SetElementAt(value, target.size());
Steve Blocka7e24c12009-10-30 11:49:00 +00004968 }
4969
4970 // Perform optimistic increment/decrement.
4971 if (is_increment) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004972 __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004973 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01004974 __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004975 }
4976
4977 // If the increment/decrement didn't overflow, we're done.
4978 exit.Branch(vc);
4979
4980 // Revert optimistic increment/decrement.
4981 if (is_increment) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004982 __ sub(value, value, Operand(Smi::FromInt(1)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004983 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01004984 __ add(value, value, Operand(Smi::FromInt(1)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004985 }
4986
Kristian Monsen25f61362010-05-21 11:50:48 +01004987 // Slow case: Convert to number. At this point the
4988 // value to be incremented is in the value register..
Steve Blocka7e24c12009-10-30 11:49:00 +00004989 slow.Bind();
Kristian Monsen25f61362010-05-21 11:50:48 +01004990
4991 // Convert the operand to a number.
4992 frame_->EmitPush(value);
4993
Steve Blocka7e24c12009-10-30 11:49:00 +00004994 {
Kristian Monsen25f61362010-05-21 11:50:48 +01004995 VirtualFrame::SpilledScope spilled(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00004996 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01004997
4998 if (is_postfix) {
4999 // Postfix: store to result (on the stack).
5000 __ str(r0, frame_->ElementAt(target.size()));
5001 }
5002
5003 // Compute the new value.
5004 frame_->EmitPush(r0);
5005 frame_->EmitPush(Operand(Smi::FromInt(1)));
5006 if (is_increment) {
5007 frame_->CallRuntime(Runtime::kNumberAdd, 2);
5008 } else {
5009 frame_->CallRuntime(Runtime::kNumberSub, 2);
5010 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005011 }
5012
Kristian Monsen25f61362010-05-21 11:50:48 +01005013 __ Move(value, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005014 // Store the new value in the target if not const.
Kristian Monsen25f61362010-05-21 11:50:48 +01005015 // At this point the answer is in the value register.
Steve Blocka7e24c12009-10-30 11:49:00 +00005016 exit.Bind();
Kristian Monsen25f61362010-05-21 11:50:48 +01005017 frame_->EmitPush(value);
5018 // Set the target with the result, leaving the result on
5019 // top of the stack. Removes the target from the stack if
5020 // it has a non-zero size.
Steve Blocka7e24c12009-10-30 11:49:00 +00005021 if (!is_const) target.SetValue(NOT_CONST_INIT);
5022 }
5023
5024 // Postfix: Discard the new value and use the old.
Kristian Monsen25f61362010-05-21 11:50:48 +01005025 if (is_postfix) frame_->Pop();
Steve Block6ded16b2010-05-10 14:33:55 +01005026 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00005027}
5028
5029
Steve Block6ded16b2010-05-10 14:33:55 +01005030void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005031 // According to ECMA-262 section 11.11, page 58, the binary logical
5032 // operators must yield the result of one of the two expressions
5033 // before any ToBoolean() conversions. This means that the value
5034 // produced by a && or || operator is not necessarily a boolean.
5035
5036 // NOTE: If the left hand side produces a materialized value (not in
5037 // the CC register), we force the right hand side to do the
5038 // same. This is necessary because we may have to branch to the exit
5039 // after evaluating the left hand side (due to the shortcut
5040 // semantics), but the compiler must (statically) know if the result
5041 // of compiling the binary operation is materialized or not.
Leon Clarkef7060e22010-06-03 12:02:55 +01005042 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block6ded16b2010-05-10 14:33:55 +01005043 if (node->op() == Token::AND) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005044 JumpTarget is_true;
5045 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005046 &is_true,
5047 false_target(),
5048 false);
5049 if (has_valid_frame() && !has_cc()) {
5050 // The left-hand side result is on top of the virtual frame.
5051 JumpTarget pop_and_continue;
5052 JumpTarget exit;
5053
Leon Clarkef7060e22010-06-03 12:02:55 +01005054 frame_->Dup();
Steve Blocka7e24c12009-10-30 11:49:00 +00005055 // Avoid popping the result if it converts to 'false' using the
5056 // standard ToBoolean() conversion as described in ECMA-262,
5057 // section 9.2, page 30.
5058 ToBoolean(&pop_and_continue, &exit);
5059 Branch(false, &exit);
5060
5061 // Pop the result of evaluating the first part.
5062 pop_and_continue.Bind();
Leon Clarkef7060e22010-06-03 12:02:55 +01005063 frame_->Pop();
Steve Blocka7e24c12009-10-30 11:49:00 +00005064
5065 // Evaluate right side expression.
5066 is_true.Bind();
5067 LoadAndSpill(node->right());
5068
5069 // Exit (always with a materialized value).
5070 exit.Bind();
5071 } else if (has_cc() || is_true.is_linked()) {
5072 // The left-hand side is either (a) partially compiled to
5073 // control flow with a final branch left to emit or (b) fully
5074 // compiled to control flow and possibly true.
5075 if (has_cc()) {
5076 Branch(false, false_target());
5077 }
5078 is_true.Bind();
5079 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005080 true_target(),
5081 false_target(),
5082 false);
5083 } else {
5084 // Nothing to do.
5085 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
5086 }
5087
Steve Block6ded16b2010-05-10 14:33:55 +01005088 } else {
5089 ASSERT(node->op() == Token::OR);
Steve Blocka7e24c12009-10-30 11:49:00 +00005090 JumpTarget is_false;
5091 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005092 true_target(),
5093 &is_false,
5094 false);
5095 if (has_valid_frame() && !has_cc()) {
5096 // The left-hand side result is on top of the virtual frame.
5097 JumpTarget pop_and_continue;
5098 JumpTarget exit;
5099
Leon Clarkef7060e22010-06-03 12:02:55 +01005100 frame_->Dup();
Steve Blocka7e24c12009-10-30 11:49:00 +00005101 // Avoid popping the result if it converts to 'true' using the
5102 // standard ToBoolean() conversion as described in ECMA-262,
5103 // section 9.2, page 30.
5104 ToBoolean(&exit, &pop_and_continue);
5105 Branch(true, &exit);
5106
5107 // Pop the result of evaluating the first part.
5108 pop_and_continue.Bind();
Leon Clarkef7060e22010-06-03 12:02:55 +01005109 frame_->Pop();
Steve Blocka7e24c12009-10-30 11:49:00 +00005110
5111 // Evaluate right side expression.
5112 is_false.Bind();
5113 LoadAndSpill(node->right());
5114
5115 // Exit (always with a materialized value).
5116 exit.Bind();
5117 } else if (has_cc() || is_false.is_linked()) {
5118 // The left-hand side is either (a) partially compiled to
5119 // control flow with a final branch left to emit or (b) fully
5120 // compiled to control flow and possibly false.
5121 if (has_cc()) {
5122 Branch(true, true_target());
5123 }
5124 is_false.Bind();
5125 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005126 true_target(),
5127 false_target(),
5128 false);
5129 } else {
5130 // Nothing to do.
5131 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
5132 }
Steve Block6ded16b2010-05-10 14:33:55 +01005133 }
5134}
Steve Blocka7e24c12009-10-30 11:49:00 +00005135
Steve Block6ded16b2010-05-10 14:33:55 +01005136
5137void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5138#ifdef DEBUG
5139 int original_height = frame_->height();
5140#endif
5141 Comment cmnt(masm_, "[ BinaryOperation");
5142
5143 if (node->op() == Token::AND || node->op() == Token::OR) {
Steve Block6ded16b2010-05-10 14:33:55 +01005144 GenerateLogicalBooleanOperation(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00005145 } else {
5146 // Optimize for the case where (at least) one of the expressions
5147 // is a literal small integer.
5148 Literal* lliteral = node->left()->AsLiteral();
5149 Literal* rliteral = node->right()->AsLiteral();
5150 // NOTE: The code below assumes that the slow cases (calls to runtime)
5151 // never return a constant/immutable object.
5152 bool overwrite_left =
5153 (node->left()->AsBinaryOperation() != NULL &&
5154 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5155 bool overwrite_right =
5156 (node->right()->AsBinaryOperation() != NULL &&
5157 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5158
5159 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
Steve Block6ded16b2010-05-10 14:33:55 +01005160 VirtualFrame::RegisterAllocationScope scope(this);
5161 Load(node->left());
Steve Blocka7e24c12009-10-30 11:49:00 +00005162 SmiOperation(node->op(),
5163 rliteral->handle(),
5164 false,
5165 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005166 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
Steve Block6ded16b2010-05-10 14:33:55 +01005167 VirtualFrame::RegisterAllocationScope scope(this);
5168 Load(node->right());
Steve Blocka7e24c12009-10-30 11:49:00 +00005169 SmiOperation(node->op(),
5170 lliteral->handle(),
5171 true,
5172 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005173 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01005174 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005175 OverwriteMode overwrite_mode = NO_OVERWRITE;
5176 if (overwrite_left) {
5177 overwrite_mode = OVERWRITE_LEFT;
5178 } else if (overwrite_right) {
5179 overwrite_mode = OVERWRITE_RIGHT;
5180 }
Steve Block6ded16b2010-05-10 14:33:55 +01005181 Load(node->left());
5182 Load(node->right());
5183 VirtualFrameBinaryOperation(node->op(), overwrite_mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00005184 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005185 }
5186 ASSERT(!has_valid_frame() ||
5187 (has_cc() && frame_->height() == original_height) ||
5188 (!has_cc() && frame_->height() == original_height + 1));
5189}
5190
5191
5192void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5193#ifdef DEBUG
5194 int original_height = frame_->height();
5195#endif
Leon Clarkef7060e22010-06-03 12:02:55 +01005196 frame_->EmitPush(MemOperand(frame_->Function()));
Steve Block6ded16b2010-05-10 14:33:55 +01005197 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00005198}
5199
5200
5201void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5202#ifdef DEBUG
5203 int original_height = frame_->height();
5204#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005205 Comment cmnt(masm_, "[ CompareOperation");
5206
Steve Block6ded16b2010-05-10 14:33:55 +01005207 VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
5208
Steve Blocka7e24c12009-10-30 11:49:00 +00005209 // Get the expressions from the node.
5210 Expression* left = node->left();
5211 Expression* right = node->right();
5212 Token::Value op = node->op();
5213
5214 // To make null checks efficient, we check if either left or right is the
5215 // literal 'null'. If so, we optimize the code by inlining a null check
5216 // instead of calling the (very) general runtime routine for checking
5217 // equality.
5218 if (op == Token::EQ || op == Token::EQ_STRICT) {
5219 bool left_is_null =
5220 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
5221 bool right_is_null =
5222 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
5223 // The 'null' value can only be equal to 'null' or 'undefined'.
5224 if (left_is_null || right_is_null) {
Steve Block6ded16b2010-05-10 14:33:55 +01005225 Load(left_is_null ? right : left);
5226 Register tos = frame_->PopToRegister();
5227 // JumpTargets can't cope with register allocation yet.
5228 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00005229 __ LoadRoot(ip, Heap::kNullValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005230 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005231
5232 // The 'null' value is only equal to 'undefined' if using non-strict
5233 // comparisons.
5234 if (op != Token::EQ_STRICT) {
5235 true_target()->Branch(eq);
5236
5237 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005238 __ cmp(tos, Operand(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00005239 true_target()->Branch(eq);
5240
Steve Block6ded16b2010-05-10 14:33:55 +01005241 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005242 false_target()->Branch(eq);
5243
5244 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005245 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5246 __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
5247 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5248 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005249 }
5250
5251 cc_reg_ = eq;
5252 ASSERT(has_cc() && frame_->height() == original_height);
5253 return;
5254 }
5255 }
5256
5257 // To make typeof testing for natives implemented in JavaScript really
5258 // efficient, we generate special code for expressions of the form:
5259 // 'typeof <expression> == <string>'.
5260 UnaryOperation* operation = left->AsUnaryOperation();
5261 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5262 (operation != NULL && operation->op() == Token::TYPEOF) &&
5263 (right->AsLiteral() != NULL &&
5264 right->AsLiteral()->handle()->IsString())) {
5265 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5266
Steve Block6ded16b2010-05-10 14:33:55 +01005267 // Load the operand, move it to a register.
Steve Blocka7e24c12009-10-30 11:49:00 +00005268 LoadTypeofExpression(operation->expression());
Steve Block6ded16b2010-05-10 14:33:55 +01005269 Register tos = frame_->PopToRegister();
5270
5271 // JumpTargets can't cope with register allocation yet.
5272 frame_->SpillAll();
5273
5274 Register scratch = VirtualFrame::scratch0();
Steve Blocka7e24c12009-10-30 11:49:00 +00005275
5276 if (check->Equals(Heap::number_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005277 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005278 true_target()->Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01005279 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005280 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005281 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005282 cc_reg_ = eq;
5283
5284 } else if (check->Equals(Heap::string_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005285 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005286 false_target()->Branch(eq);
5287
Steve Block6ded16b2010-05-10 14:33:55 +01005288 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005289
5290 // It can be an undetectable string object.
Steve Block6ded16b2010-05-10 14:33:55 +01005291 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5292 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5293 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005294 false_target()->Branch(eq);
5295
Steve Block6ded16b2010-05-10 14:33:55 +01005296 __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
5297 __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005298 cc_reg_ = lt;
5299
5300 } else if (check->Equals(Heap::boolean_symbol())) {
5301 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005302 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005303 true_target()->Branch(eq);
5304 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005305 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005306 cc_reg_ = eq;
5307
5308 } else if (check->Equals(Heap::undefined_symbol())) {
5309 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005310 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005311 true_target()->Branch(eq);
5312
Steve Block6ded16b2010-05-10 14:33:55 +01005313 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005314 false_target()->Branch(eq);
5315
5316 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005317 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5318 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5319 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5320 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005321
5322 cc_reg_ = eq;
5323
5324 } else if (check->Equals(Heap::function_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005325 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005326 false_target()->Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01005327 Register map_reg = scratch;
5328 __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
Steve Blockd0582a62009-12-15 09:54:21 +00005329 true_target()->Branch(eq);
5330 // Regular expressions are callable so typeof == 'function'.
Steve Block6ded16b2010-05-10 14:33:55 +01005331 __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005332 cc_reg_ = eq;
5333
5334 } else if (check->Equals(Heap::object_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005335 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005336 false_target()->Branch(eq);
5337
Steve Blocka7e24c12009-10-30 11:49:00 +00005338 __ LoadRoot(ip, Heap::kNullValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005339 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005340 true_target()->Branch(eq);
5341
Steve Block6ded16b2010-05-10 14:33:55 +01005342 Register map_reg = scratch;
5343 __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
Steve Blockd0582a62009-12-15 09:54:21 +00005344 false_target()->Branch(eq);
5345
Steve Blocka7e24c12009-10-30 11:49:00 +00005346 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005347 __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5348 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5349 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005350 false_target()->Branch(eq);
5351
Steve Block6ded16b2010-05-10 14:33:55 +01005352 __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5353 __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005354 false_target()->Branch(lt);
Steve Block6ded16b2010-05-10 14:33:55 +01005355 __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005356 cc_reg_ = le;
5357
5358 } else {
5359 // Uncommon case: typeof testing against a string literal that is
5360 // never returned from the typeof operator.
5361 false_target()->Jump();
5362 }
5363 ASSERT(!has_valid_frame() ||
5364 (has_cc() && frame_->height() == original_height));
5365 return;
5366 }
5367
5368 switch (op) {
5369 case Token::EQ:
5370 Comparison(eq, left, right, false);
5371 break;
5372
5373 case Token::LT:
5374 Comparison(lt, left, right);
5375 break;
5376
5377 case Token::GT:
5378 Comparison(gt, left, right);
5379 break;
5380
5381 case Token::LTE:
5382 Comparison(le, left, right);
5383 break;
5384
5385 case Token::GTE:
5386 Comparison(ge, left, right);
5387 break;
5388
5389 case Token::EQ_STRICT:
5390 Comparison(eq, left, right, true);
5391 break;
5392
5393 case Token::IN: {
Steve Block6ded16b2010-05-10 14:33:55 +01005394 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005395 LoadAndSpill(left);
5396 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00005397 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00005398 frame_->EmitPush(r0);
5399 break;
5400 }
5401
5402 case Token::INSTANCEOF: {
Steve Block6ded16b2010-05-10 14:33:55 +01005403 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005404 LoadAndSpill(left);
5405 LoadAndSpill(right);
5406 InstanceofStub stub;
5407 frame_->CallStub(&stub, 2);
5408 // At this point if instanceof succeeded then r0 == 0.
5409 __ tst(r0, Operand(r0));
5410 cc_reg_ = eq;
5411 break;
5412 }
5413
5414 default:
5415 UNREACHABLE();
5416 }
5417 ASSERT((has_cc() && frame_->height() == original_height) ||
5418 (!has_cc() && frame_->height() == original_height + 1));
5419}
5420
5421
Steve Block6ded16b2010-05-10 14:33:55 +01005422class DeferredReferenceGetNamedValue: public DeferredCode {
5423 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01005424 explicit DeferredReferenceGetNamedValue(Register receiver,
5425 Handle<String> name)
5426 : receiver_(receiver), name_(name) {
Steve Block6ded16b2010-05-10 14:33:55 +01005427 set_comment("[ DeferredReferenceGetNamedValue");
5428 }
5429
5430 virtual void Generate();
5431
5432 private:
Leon Clarkef7060e22010-06-03 12:02:55 +01005433 Register receiver_;
Steve Block6ded16b2010-05-10 14:33:55 +01005434 Handle<String> name_;
5435};
5436
5437
5438void DeferredReferenceGetNamedValue::Generate() {
Leon Clarkef7060e22010-06-03 12:02:55 +01005439 ASSERT(receiver_.is(r0) || receiver_.is(r1));
5440
Steve Block6ded16b2010-05-10 14:33:55 +01005441 Register scratch1 = VirtualFrame::scratch0();
5442 Register scratch2 = VirtualFrame::scratch1();
5443 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
5444 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
5445
Leon Clarkef7060e22010-06-03 12:02:55 +01005446 // Ensure receiver in r0 and name in r2 to match load ic calling convention.
5447 __ Move(r0, receiver_);
Steve Block6ded16b2010-05-10 14:33:55 +01005448 __ mov(r2, Operand(name_));
5449
5450 // The rest of the instructions in the deferred code must be together.
5451 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5452 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5453 __ Call(ic, RelocInfo::CODE_TARGET);
5454 // The call must be followed by a nop(1) instruction to indicate that the
5455 // in-object has been inlined.
5456 __ nop(PROPERTY_ACCESS_INLINED);
5457
5458 // Block the constant pool for one more instruction after leaving this
5459 // constant pool block scope to include the branch instruction ending the
5460 // deferred code.
5461 __ BlockConstPoolFor(1);
5462 }
5463}
5464
5465
5466class DeferredReferenceGetKeyedValue: public DeferredCode {
5467 public:
Kristian Monsen25f61362010-05-21 11:50:48 +01005468 DeferredReferenceGetKeyedValue(Register key, Register receiver)
5469 : key_(key), receiver_(receiver) {
Steve Block6ded16b2010-05-10 14:33:55 +01005470 set_comment("[ DeferredReferenceGetKeyedValue");
5471 }
5472
5473 virtual void Generate();
Kristian Monsen25f61362010-05-21 11:50:48 +01005474
5475 private:
5476 Register key_;
5477 Register receiver_;
Steve Block6ded16b2010-05-10 14:33:55 +01005478};
5479
5480
5481void DeferredReferenceGetKeyedValue::Generate() {
Kristian Monsen25f61362010-05-21 11:50:48 +01005482 ASSERT((key_.is(r0) && receiver_.is(r1)) ||
5483 (key_.is(r1) && receiver_.is(r0)));
5484
Steve Block6ded16b2010-05-10 14:33:55 +01005485 Register scratch1 = VirtualFrame::scratch0();
5486 Register scratch2 = VirtualFrame::scratch1();
5487 __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
5488 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
5489
Kristian Monsen25f61362010-05-21 11:50:48 +01005490 // Ensure key in r0 and receiver in r1 to match keyed load ic calling
5491 // convention.
5492 if (key_.is(r1)) {
5493 __ Swap(r0, r1, ip);
5494 }
5495
Steve Block6ded16b2010-05-10 14:33:55 +01005496 // The rest of the instructions in the deferred code must be together.
5497 { Assembler::BlockConstPoolScope block_const_pool(masm_);
Kristian Monsen25f61362010-05-21 11:50:48 +01005498 // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
Steve Block6ded16b2010-05-10 14:33:55 +01005499 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
5500 __ Call(ic, RelocInfo::CODE_TARGET);
5501 // The call must be followed by a nop instruction to indicate that the
5502 // keyed load has been inlined.
5503 __ nop(PROPERTY_ACCESS_INLINED);
5504
5505 // Block the constant pool for one more instruction after leaving this
5506 // constant pool block scope to include the branch instruction ending the
5507 // deferred code.
5508 __ BlockConstPoolFor(1);
5509 }
5510}
5511
5512
5513class DeferredReferenceSetKeyedValue: public DeferredCode {
5514 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01005515 DeferredReferenceSetKeyedValue(Register value,
5516 Register key,
5517 Register receiver)
5518 : value_(value), key_(key), receiver_(receiver) {
Steve Block6ded16b2010-05-10 14:33:55 +01005519 set_comment("[ DeferredReferenceSetKeyedValue");
5520 }
5521
5522 virtual void Generate();
Leon Clarkef7060e22010-06-03 12:02:55 +01005523
5524 private:
5525 Register value_;
5526 Register key_;
5527 Register receiver_;
Steve Block6ded16b2010-05-10 14:33:55 +01005528};
5529
5530
5531void DeferredReferenceSetKeyedValue::Generate() {
5532 Register scratch1 = VirtualFrame::scratch0();
5533 Register scratch2 = VirtualFrame::scratch1();
5534 __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
5535 __ IncrementCounter(
5536 &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
5537
Leon Clarkef7060e22010-06-03 12:02:55 +01005538 // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
5539 // calling convention.
5540 if (value_.is(r1)) {
5541 __ Swap(r0, r1, ip);
5542 }
5543 ASSERT(receiver_.is(r2));
5544
Steve Block6ded16b2010-05-10 14:33:55 +01005545 // The rest of the instructions in the deferred code must be together.
5546 { Assembler::BlockConstPoolScope block_const_pool(masm_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005547 // Call keyed store IC. It has the arguments value, key and receiver in r0,
5548 // r1 and r2.
Steve Block6ded16b2010-05-10 14:33:55 +01005549 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
5550 __ Call(ic, RelocInfo::CODE_TARGET);
5551 // The call must be followed by a nop instruction to indicate that the
5552 // keyed store has been inlined.
5553 __ nop(PROPERTY_ACCESS_INLINED);
5554
5555 // Block the constant pool for one more instruction after leaving this
5556 // constant pool block scope to include the branch instruction ending the
5557 // deferred code.
5558 __ BlockConstPoolFor(1);
5559 }
5560}
5561
5562
5563void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
5564 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
5565 Comment cmnt(masm(), "[ Load from named Property");
5566 // Setup the name register and call load IC.
5567 frame_->CallLoadIC(name,
5568 is_contextual
5569 ? RelocInfo::CODE_TARGET_CONTEXT
5570 : RelocInfo::CODE_TARGET);
5571 } else {
5572 // Inline the in-object property case.
5573 Comment cmnt(masm(), "[ Inlined named property load");
5574
5575 // Counter will be decremented in the deferred code. Placed here to avoid
5576 // having it in the instruction stream below where patching will occur.
5577 __ IncrementCounter(&Counters::named_load_inline, 1,
5578 frame_->scratch0(), frame_->scratch1());
5579
5580 // The following instructions are the inlined load of an in-object property.
5581 // Parts of this code is patched, so the exact instructions generated needs
5582 // to be fixed. Therefore the instruction pool is blocked when generating
5583 // this code
5584
5585 // Load the receiver from the stack.
Leon Clarkef7060e22010-06-03 12:02:55 +01005586 Register receiver = frame_->PopToRegister();
5587 VirtualFrame::SpilledScope spilled(frame_);
Steve Block6ded16b2010-05-10 14:33:55 +01005588
5589 DeferredReferenceGetNamedValue* deferred =
Leon Clarkef7060e22010-06-03 12:02:55 +01005590 new DeferredReferenceGetNamedValue(receiver, name);
Steve Block6ded16b2010-05-10 14:33:55 +01005591
5592#ifdef DEBUG
5593 int kInlinedNamedLoadInstructions = 7;
5594 Label check_inlined_codesize;
5595 masm_->bind(&check_inlined_codesize);
5596#endif
5597
5598 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5599 // Check that the receiver is a heap object.
Leon Clarkef7060e22010-06-03 12:02:55 +01005600 __ tst(receiver, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01005601 deferred->Branch(eq);
5602
5603 // Check the map. The null map used below is patched by the inline cache
5604 // code.
Leon Clarkef7060e22010-06-03 12:02:55 +01005605 __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01005606 __ mov(r3, Operand(Factory::null_value()));
5607 __ cmp(r2, r3);
5608 deferred->Branch(ne);
5609
5610 // Initially use an invalid index. The index will be patched by the
5611 // inline cache code.
Leon Clarkef7060e22010-06-03 12:02:55 +01005612 __ ldr(r0, MemOperand(receiver, 0));
Steve Block6ded16b2010-05-10 14:33:55 +01005613
5614 // Make sure that the expected number of instructions are generated.
5615 ASSERT_EQ(kInlinedNamedLoadInstructions,
5616 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5617 }
5618
5619 deferred->BindExit();
5620 }
5621}
5622
5623
5624void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
5625#ifdef DEBUG
5626 int expected_height = frame_->height() - (is_contextual ? 1 : 2);
5627#endif
5628 frame_->CallStoreIC(name, is_contextual);
5629
5630 ASSERT_EQ(expected_height, frame_->height());
5631}
5632
5633
5634void CodeGenerator::EmitKeyedLoad() {
5635 if (loop_nesting() == 0) {
5636 Comment cmnt(masm_, "[ Load from keyed property");
5637 frame_->CallKeyedLoadIC();
5638 } else {
5639 // Inline the keyed load.
5640 Comment cmnt(masm_, "[ Inlined load from keyed property");
5641
5642 // Counter will be decremented in the deferred code. Placed here to avoid
5643 // having it in the instruction stream below where patching will occur.
5644 __ IncrementCounter(&Counters::keyed_load_inline, 1,
5645 frame_->scratch0(), frame_->scratch1());
5646
Kristian Monsen25f61362010-05-21 11:50:48 +01005647 // Load the key and receiver from the stack.
5648 Register key = frame_->PopToRegister();
5649 Register receiver = frame_->PopToRegister(key);
Steve Block6ded16b2010-05-10 14:33:55 +01005650 VirtualFrame::SpilledScope spilled(frame_);
5651
Kristian Monsen25f61362010-05-21 11:50:48 +01005652 // The deferred code expects key and receiver in registers.
Steve Block6ded16b2010-05-10 14:33:55 +01005653 DeferredReferenceGetKeyedValue* deferred =
Kristian Monsen25f61362010-05-21 11:50:48 +01005654 new DeferredReferenceGetKeyedValue(key, receiver);
Steve Block6ded16b2010-05-10 14:33:55 +01005655
5656 // Check that the receiver is a heap object.
5657 __ tst(receiver, Operand(kSmiTagMask));
5658 deferred->Branch(eq);
5659
5660 // The following instructions are the part of the inlined load keyed
5661 // property code which can be patched. Therefore the exact number of
5662 // instructions generated need to be fixed, so the constant pool is blocked
5663 // while generating this code.
Steve Block6ded16b2010-05-10 14:33:55 +01005664 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5665 Register scratch1 = VirtualFrame::scratch0();
5666 Register scratch2 = VirtualFrame::scratch1();
5667 // Check the map. The null map used below is patched by the inline cache
5668 // code.
5669 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
Kristian Monsen25f61362010-05-21 11:50:48 +01005670#ifdef DEBUG
5671 Label check_inlined_codesize;
5672 masm_->bind(&check_inlined_codesize);
5673#endif
Steve Block6ded16b2010-05-10 14:33:55 +01005674 __ mov(scratch2, Operand(Factory::null_value()));
5675 __ cmp(scratch1, scratch2);
5676 deferred->Branch(ne);
5677
5678 // Check that the key is a smi.
5679 __ tst(key, Operand(kSmiTagMask));
5680 deferred->Branch(ne);
5681
5682 // Get the elements array from the receiver and check that it
5683 // is not a dictionary.
5684 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
5685 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
5686 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5687 __ cmp(scratch2, ip);
5688 deferred->Branch(ne);
5689
5690 // Check that key is within bounds. Use unsigned comparison to handle
5691 // negative keys.
5692 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
5693 __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
5694 deferred->Branch(ls); // Unsigned less equal.
5695
5696 // Load and check that the result is not the hole (key is a smi).
5697 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
5698 __ add(scratch1,
5699 scratch1,
5700 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
Kristian Monsen25f61362010-05-21 11:50:48 +01005701 __ ldr(scratch1,
Steve Block6ded16b2010-05-10 14:33:55 +01005702 MemOperand(scratch1, key, LSL,
5703 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
Kristian Monsen25f61362010-05-21 11:50:48 +01005704 __ cmp(scratch1, scratch2);
Steve Block6ded16b2010-05-10 14:33:55 +01005705 deferred->Branch(eq);
5706
Kristian Monsen25f61362010-05-21 11:50:48 +01005707 __ mov(r0, scratch1);
Steve Block6ded16b2010-05-10 14:33:55 +01005708 // Make sure that the expected number of instructions are generated.
Leon Clarkef7060e22010-06-03 12:02:55 +01005709 ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
Steve Block6ded16b2010-05-10 14:33:55 +01005710 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5711 }
5712
5713 deferred->BindExit();
5714 }
5715}
5716
5717
5718void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
Steve Block6ded16b2010-05-10 14:33:55 +01005719 // Generate inlined version of the keyed store if the code is in a loop
5720 // and the key is likely to be a smi.
5721 if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
5722 // Inline the keyed store.
5723 Comment cmnt(masm_, "[ Inlined store to keyed property");
5724
Leon Clarkef7060e22010-06-03 12:02:55 +01005725 Register scratch1 = VirtualFrame::scratch0();
5726 Register scratch2 = VirtualFrame::scratch1();
5727 Register scratch3 = r3;
Steve Block6ded16b2010-05-10 14:33:55 +01005728
5729 // Counter will be decremented in the deferred code. Placed here to avoid
5730 // having it in the instruction stream below where patching will occur.
5731 __ IncrementCounter(&Counters::keyed_store_inline, 1,
Leon Clarkef7060e22010-06-03 12:02:55 +01005732 scratch1, scratch2);
5733
5734 // Load the value, key and receiver from the stack.
5735 Register value = frame_->PopToRegister();
5736 Register key = frame_->PopToRegister(value);
5737 Register receiver = r2;
5738 frame_->EmitPop(receiver);
5739 VirtualFrame::SpilledScope spilled(frame_);
5740
5741 // The deferred code expects value, key and receiver in registers.
5742 DeferredReferenceSetKeyedValue* deferred =
5743 new DeferredReferenceSetKeyedValue(value, key, receiver);
Steve Block6ded16b2010-05-10 14:33:55 +01005744
5745 // Check that the value is a smi. As this inlined code does not set the
5746 // write barrier it is only possible to store smi values.
Leon Clarkef7060e22010-06-03 12:02:55 +01005747 __ tst(value, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01005748 deferred->Branch(ne);
5749
Steve Block6ded16b2010-05-10 14:33:55 +01005750 // Check that the key is a smi.
Leon Clarkef7060e22010-06-03 12:02:55 +01005751 __ tst(key, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01005752 deferred->Branch(ne);
5753
5754 // Check that the receiver is a heap object.
Leon Clarkef7060e22010-06-03 12:02:55 +01005755 __ tst(receiver, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01005756 deferred->Branch(eq);
5757
5758 // Check that the receiver is a JSArray.
Leon Clarkef7060e22010-06-03 12:02:55 +01005759 __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
Steve Block6ded16b2010-05-10 14:33:55 +01005760 deferred->Branch(ne);
5761
5762 // Check that the key is within bounds. Both the key and the length of
5763 // the JSArray are smis. Use unsigned comparison to handle negative keys.
Leon Clarkef7060e22010-06-03 12:02:55 +01005764 __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
5765 __ cmp(scratch1, key);
Steve Block6ded16b2010-05-10 14:33:55 +01005766 deferred->Branch(ls); // Unsigned less equal.
5767
5768 // The following instructions are the part of the inlined store keyed
5769 // property code which can be patched. Therefore the exact number of
5770 // instructions generated need to be fixed, so the constant pool is blocked
5771 // while generating this code.
Steve Block6ded16b2010-05-10 14:33:55 +01005772 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5773 // Get the elements array from the receiver and check that it
5774 // is not a dictionary.
Leon Clarkef7060e22010-06-03 12:02:55 +01005775 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
5776 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01005777 // Read the fixed array map from the constant pool (not from the root
5778 // array) so that the value can be patched. When debugging, we patch this
5779 // comparison to always fail so that we will hit the IC call in the
5780 // deferred code which will allow the debugger to break for fast case
5781 // stores.
Leon Clarkef7060e22010-06-03 12:02:55 +01005782#ifdef DEBUG
5783 Label check_inlined_codesize;
5784 masm_->bind(&check_inlined_codesize);
5785#endif
5786 __ mov(scratch3, Operand(Factory::fixed_array_map()));
5787 __ cmp(scratch2, scratch3);
Steve Block6ded16b2010-05-10 14:33:55 +01005788 deferred->Branch(ne);
5789
5790 // Store the value.
Leon Clarkef7060e22010-06-03 12:02:55 +01005791 __ add(scratch1, scratch1,
5792 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5793 __ str(value,
5794 MemOperand(scratch1, key, LSL,
5795 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
Steve Block6ded16b2010-05-10 14:33:55 +01005796
5797 // Make sure that the expected number of instructions are generated.
Leon Clarkef7060e22010-06-03 12:02:55 +01005798 ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
Steve Block6ded16b2010-05-10 14:33:55 +01005799 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5800 }
5801
5802 deferred->BindExit();
5803 } else {
5804 frame()->CallKeyedStoreIC();
5805 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005806}
5807
5808
Steve Blocka7e24c12009-10-30 11:49:00 +00005809#ifdef DEBUG
5810bool CodeGenerator::HasValidEntryRegisters() { return true; }
5811#endif
5812
5813
5814#undef __
5815#define __ ACCESS_MASM(masm)
5816
5817
5818Handle<String> Reference::GetName() {
5819 ASSERT(type_ == NAMED);
5820 Property* property = expression_->AsProperty();
5821 if (property == NULL) {
5822 // Global variable reference treated as a named property reference.
5823 VariableProxy* proxy = expression_->AsVariableProxy();
5824 ASSERT(proxy->AsVariable() != NULL);
5825 ASSERT(proxy->AsVariable()->is_global());
5826 return proxy->name();
5827 } else {
5828 Literal* raw_name = property->key()->AsLiteral();
5829 ASSERT(raw_name != NULL);
5830 return Handle<String>(String::cast(*raw_name->handle()));
5831 }
5832}
5833
5834
Steve Blockd0582a62009-12-15 09:54:21 +00005835void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005836 ASSERT(cgen_->HasValidEntryRegisters());
5837 ASSERT(!is_illegal());
5838 ASSERT(!cgen_->has_cc());
5839 MacroAssembler* masm = cgen_->masm();
5840 Property* property = expression_->AsProperty();
5841 if (property != NULL) {
5842 cgen_->CodeForSourcePosition(property->position());
5843 }
5844
5845 switch (type_) {
5846 case SLOT: {
5847 Comment cmnt(masm, "[ Load from Slot");
5848 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5849 ASSERT(slot != NULL);
Steve Block6ded16b2010-05-10 14:33:55 +01005850 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
Kristian Monsen25f61362010-05-21 11:50:48 +01005851 if (!persist_after_get_) {
5852 cgen_->UnloadReference(this);
5853 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005854 break;
5855 }
5856
5857 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00005858 Variable* var = expression_->AsVariableProxy()->AsVariable();
Steve Block6ded16b2010-05-10 14:33:55 +01005859 bool is_global = var != NULL;
5860 ASSERT(!is_global || var->is_global());
Leon Clarkef7060e22010-06-03 12:02:55 +01005861 if (persist_after_get_) {
5862 cgen_->frame()->Dup();
5863 }
Steve Block6ded16b2010-05-10 14:33:55 +01005864 cgen_->EmitNamedLoad(GetName(), is_global);
5865 cgen_->frame()->EmitPush(r0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005866 if (!persist_after_get_) set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005867 break;
5868 }
5869
5870 case KEYED: {
Leon Clarkef7060e22010-06-03 12:02:55 +01005871 ASSERT(property != NULL);
Kristian Monsen25f61362010-05-21 11:50:48 +01005872 if (persist_after_get_) {
5873 cgen_->frame()->Dup2();
5874 }
Steve Block6ded16b2010-05-10 14:33:55 +01005875 cgen_->EmitKeyedLoad();
Leon Clarked91b9f72010-01-27 17:25:45 +00005876 cgen_->frame()->EmitPush(r0);
Kristian Monsen25f61362010-05-21 11:50:48 +01005877 if (!persist_after_get_) set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005878 break;
5879 }
5880
5881 default:
5882 UNREACHABLE();
5883 }
5884}
5885
5886
5887void Reference::SetValue(InitState init_state) {
5888 ASSERT(!is_illegal());
5889 ASSERT(!cgen_->has_cc());
5890 MacroAssembler* masm = cgen_->masm();
5891 VirtualFrame* frame = cgen_->frame();
5892 Property* property = expression_->AsProperty();
5893 if (property != NULL) {
5894 cgen_->CodeForSourcePosition(property->position());
5895 }
5896
5897 switch (type_) {
5898 case SLOT: {
5899 Comment cmnt(masm, "[ Store to Slot");
5900 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00005901 cgen_->StoreToSlot(slot, init_state);
Steve Block6ded16b2010-05-10 14:33:55 +01005902 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005903 break;
5904 }
5905
5906 case NAMED: {
5907 Comment cmnt(masm, "[ Store to named Property");
Steve Block6ded16b2010-05-10 14:33:55 +01005908 cgen_->EmitNamedStore(GetName(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00005909 frame->EmitPush(r0);
Andrei Popescu402d9372010-02-26 13:31:12 +00005910 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005911 break;
5912 }
5913
5914 case KEYED: {
5915 Comment cmnt(masm, "[ Store to keyed Property");
5916 Property* property = expression_->AsProperty();
5917 ASSERT(property != NULL);
5918 cgen_->CodeForSourcePosition(property->position());
Steve Block6ded16b2010-05-10 14:33:55 +01005919 cgen_->EmitKeyedStore(property->key()->type());
Steve Blocka7e24c12009-10-30 11:49:00 +00005920 frame->EmitPush(r0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005921 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005922 break;
5923 }
5924
5925 default:
5926 UNREACHABLE();
5927 }
5928}
5929
5930
Leon Clarkee46be812010-01-19 14:06:41 +00005931void FastNewClosureStub::Generate(MacroAssembler* masm) {
Steve Block6ded16b2010-05-10 14:33:55 +01005932 // Create a new closure from the given function info in new
5933 // space. Set the context to the current context in cp.
Leon Clarkee46be812010-01-19 14:06:41 +00005934 Label gc;
5935
Steve Block6ded16b2010-05-10 14:33:55 +01005936 // Pop the function info from the stack.
Leon Clarkee46be812010-01-19 14:06:41 +00005937 __ pop(r3);
5938
5939 // Attempt to allocate new JSFunction in new space.
Kristian Monsen25f61362010-05-21 11:50:48 +01005940 __ AllocateInNewSpace(JSFunction::kSize,
Leon Clarkee46be812010-01-19 14:06:41 +00005941 r0,
5942 r1,
5943 r2,
5944 &gc,
5945 TAG_OBJECT);
5946
5947 // Compute the function map in the current global context and set that
5948 // as the map of the allocated object.
5949 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5950 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5951 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
5952 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5953
Steve Block6ded16b2010-05-10 14:33:55 +01005954 // Initialize the rest of the function. We don't have to update the
5955 // write barrier because the allocated object is in new space.
5956 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
5957 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
5958 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5959 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
5960 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
5961 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
5962 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
5963 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005964
Steve Block6ded16b2010-05-10 14:33:55 +01005965 // Return result. The argument function info has been popped already.
Leon Clarkee46be812010-01-19 14:06:41 +00005966 __ Ret();
5967
5968 // Create a new closure through the slower runtime call.
5969 __ bind(&gc);
Steve Block6ded16b2010-05-10 14:33:55 +01005970 __ Push(cp, r3);
5971 __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
Leon Clarkee46be812010-01-19 14:06:41 +00005972}
5973
5974
5975void FastNewContextStub::Generate(MacroAssembler* masm) {
5976 // Try to allocate the context in new space.
5977 Label gc;
5978 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
5979
5980 // Attempt to allocate the context in new space.
Kristian Monsen25f61362010-05-21 11:50:48 +01005981 __ AllocateInNewSpace(FixedArray::SizeFor(length),
Leon Clarkee46be812010-01-19 14:06:41 +00005982 r0,
5983 r1,
5984 r2,
5985 &gc,
5986 TAG_OBJECT);
5987
5988 // Load the function from the stack.
Andrei Popescu402d9372010-02-26 13:31:12 +00005989 __ ldr(r3, MemOperand(sp, 0));
Leon Clarkee46be812010-01-19 14:06:41 +00005990
5991 // Setup the object header.
5992 __ LoadRoot(r2, Heap::kContextMapRootIndex);
5993 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5994 __ mov(r2, Operand(length));
5995 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
5996
5997 // Setup the fixed slots.
5998 __ mov(r1, Operand(Smi::FromInt(0)));
5999 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
6000 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
6001 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6002 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
6003
6004 // Copy the global object from the surrounding context.
6005 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6006 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
6007
6008 // Initialize the rest of the slots to undefined.
6009 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
6010 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6011 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
6012 }
6013
6014 // Remove the on-stack argument and return.
6015 __ mov(cp, r0);
6016 __ pop();
6017 __ Ret();
6018
6019 // Need to collect. Call into runtime system.
6020 __ bind(&gc);
Steve Block6ded16b2010-05-10 14:33:55 +01006021 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
Leon Clarkee46be812010-01-19 14:06:41 +00006022}
6023
6024
Andrei Popescu402d9372010-02-26 13:31:12 +00006025void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6026 // Stack layout on entry:
6027 //
6028 // [sp]: constant elements.
6029 // [sp + kPointerSize]: literal index.
6030 // [sp + (2 * kPointerSize)]: literals array.
6031
6032 // All sizes here are multiples of kPointerSize.
6033 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6034 int size = JSArray::kSize + elements_size;
6035
6036 // Load boilerplate object into r3 and check if we need to create a
6037 // boilerplate.
6038 Label slow_case;
6039 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6040 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6041 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6042 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6043 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6044 __ cmp(r3, ip);
6045 __ b(eq, &slow_case);
6046
6047 // Allocate both the JS array and the elements array in one big
6048 // allocation. This avoids multiple limit checks.
Kristian Monsen25f61362010-05-21 11:50:48 +01006049 __ AllocateInNewSpace(size,
Andrei Popescu402d9372010-02-26 13:31:12 +00006050 r0,
6051 r1,
6052 r2,
6053 &slow_case,
6054 TAG_OBJECT);
6055
6056 // Copy the JS array part.
6057 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6058 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6059 __ ldr(r1, FieldMemOperand(r3, i));
6060 __ str(r1, FieldMemOperand(r0, i));
6061 }
6062 }
6063
6064 if (length_ > 0) {
6065 // Get hold of the elements array of the boilerplate and setup the
6066 // elements pointer in the resulting object.
6067 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
6068 __ add(r2, r0, Operand(JSArray::kSize));
6069 __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
6070
6071 // Copy the elements array.
6072 for (int i = 0; i < elements_size; i += kPointerSize) {
6073 __ ldr(r1, FieldMemOperand(r3, i));
6074 __ str(r1, FieldMemOperand(r2, i));
6075 }
6076 }
6077
6078 // Return and remove the on-stack parameters.
6079 __ add(sp, sp, Operand(3 * kPointerSize));
6080 __ Ret();
6081
6082 __ bind(&slow_case);
Steve Block6ded16b2010-05-10 14:33:55 +01006083 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00006084}
6085
6086
6087// Takes a Smi and converts to an IEEE 64 bit floating point value in two
6088// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
6089// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
6090// scratch register. Destroys the source register. No GC occurs during this
6091// stub so you don't have to set up the frame.
6092class ConvertToDoubleStub : public CodeStub {
6093 public:
6094 ConvertToDoubleStub(Register result_reg_1,
6095 Register result_reg_2,
6096 Register source_reg,
6097 Register scratch_reg)
6098 : result1_(result_reg_1),
6099 result2_(result_reg_2),
6100 source_(source_reg),
6101 zeros_(scratch_reg) { }
6102
6103 private:
6104 Register result1_;
6105 Register result2_;
6106 Register source_;
6107 Register zeros_;
6108
6109 // Minor key encoding in 16 bits.
6110 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
6111 class OpBits: public BitField<Token::Value, 2, 14> {};
6112
6113 Major MajorKey() { return ConvertToDouble; }
6114 int MinorKey() {
6115 // Encode the parameters in a unique 16 bit value.
6116 return result1_.code() +
6117 (result2_.code() << 4) +
6118 (source_.code() << 8) +
6119 (zeros_.code() << 12);
6120 }
6121
6122 void Generate(MacroAssembler* masm);
6123
6124 const char* GetName() { return "ConvertToDoubleStub"; }
6125
6126#ifdef DEBUG
6127 void Print() { PrintF("ConvertToDoubleStub\n"); }
6128#endif
6129};
6130
6131
6132void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
6133#ifndef BIG_ENDIAN_FLOATING_POINT
6134 Register exponent = result1_;
6135 Register mantissa = result2_;
6136#else
6137 Register exponent = result2_;
6138 Register mantissa = result1_;
6139#endif
6140 Label not_special;
6141 // Convert from Smi to integer.
6142 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
6143 // Move sign bit from source to destination. This works because the sign bit
6144 // in the exponent word of the double has the same position and polarity as
6145 // the 2's complement sign bit in a Smi.
6146 ASSERT(HeapNumber::kSignMask == 0x80000000u);
6147 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
6148 // Subtract from 0 if source was negative.
6149 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
Steve Block6ded16b2010-05-10 14:33:55 +01006150
6151 // We have -1, 0 or 1, which we treat specially. Register source_ contains
6152 // absolute value: it is either equal to 1 (special case of -1 and 1),
6153 // greater than 1 (not a special case) or less than 1 (special case of 0).
Steve Blocka7e24c12009-10-30 11:49:00 +00006154 __ cmp(source_, Operand(1));
6155 __ b(gt, &not_special);
6156
Steve Blocka7e24c12009-10-30 11:49:00 +00006157 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
6158 static const uint32_t exponent_word_for_1 =
6159 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
Steve Block6ded16b2010-05-10 14:33:55 +01006160 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
Steve Blocka7e24c12009-10-30 11:49:00 +00006161 // 1, 0 and -1 all have 0 for the second word.
6162 __ mov(mantissa, Operand(0));
6163 __ Ret();
6164
6165 __ bind(&not_special);
Steve Block6ded16b2010-05-10 14:33:55 +01006166 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
Steve Blocka7e24c12009-10-30 11:49:00 +00006167 // Gets the wrong answer for 0, but we already checked for that case above.
Steve Block6ded16b2010-05-10 14:33:55 +01006168 __ CountLeadingZeros(source_, mantissa, zeros_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006169 // Compute exponent and or it into the exponent register.
Steve Block6ded16b2010-05-10 14:33:55 +01006170 // We use mantissa as a scratch register here.
Steve Blocka7e24c12009-10-30 11:49:00 +00006171 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
6172 __ orr(exponent,
6173 exponent,
6174 Operand(mantissa, LSL, HeapNumber::kExponentShift));
6175 // Shift up the source chopping the top bit off.
6176 __ add(zeros_, zeros_, Operand(1));
6177 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
6178 __ mov(source_, Operand(source_, LSL, zeros_));
6179 // Compute lower part of fraction (last 12 bits).
6180 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
6181 // And the top (top 20 bits).
6182 __ orr(exponent,
6183 exponent,
6184 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
6185 __ Ret();
6186}
6187
6188
Steve Blocka7e24c12009-10-30 11:49:00 +00006189// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00006190void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006191 Label max_negative_int;
6192 // the_int_ has the answer which is a signed int32 but not a Smi.
6193 // We test for the special value that has a different exponent. This test
6194 // has the neat side effect of setting the flags according to the sign.
6195 ASSERT(HeapNumber::kSignMask == 0x80000000u);
6196 __ cmp(the_int_, Operand(0x80000000u));
6197 __ b(eq, &max_negative_int);
6198 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
6199 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
6200 uint32_t non_smi_exponent =
6201 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6202 __ mov(scratch_, Operand(non_smi_exponent));
6203 // Set the sign bit in scratch_ if the value was negative.
6204 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
6205 // Subtract from 0 if the value was negative.
6206 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
6207 // We should be masking the implict first digit of the mantissa away here,
6208 // but it just ends up combining harmlessly with the last digit of the
6209 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
6210 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
6211 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
6212 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6213 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
6214 __ str(scratch_, FieldMemOperand(the_heap_number_,
6215 HeapNumber::kExponentOffset));
6216 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
6217 __ str(scratch_, FieldMemOperand(the_heap_number_,
6218 HeapNumber::kMantissaOffset));
6219 __ Ret();
6220
6221 __ bind(&max_negative_int);
6222 // The max negative int32 is stored as a positive number in the mantissa of
6223 // a double because it uses a sign bit instead of using two's complement.
6224 // The actual mantissa bits stored are all 0 because the implicit most
6225 // significant 1 bit is not stored.
6226 non_smi_exponent += 1 << HeapNumber::kExponentShift;
6227 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
6228 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
6229 __ mov(ip, Operand(0));
6230 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
6231 __ Ret();
6232}
6233
6234
6235// Handle the case where the lhs and rhs are the same object.
6236// Equality is almost reflexive (everything but NaN), so this is a test
6237// for "identity and not NaN".
6238static void EmitIdenticalObjectComparison(MacroAssembler* masm,
6239 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00006240 Condition cc,
6241 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006242 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00006243 Label heap_number, return_equal;
6244 Register exp_mask_reg = r5;
Steve Block6ded16b2010-05-10 14:33:55 +01006245 __ cmp(r0, r1);
Steve Blocka7e24c12009-10-30 11:49:00 +00006246 __ b(ne, &not_identical);
6247
Leon Clarkee46be812010-01-19 14:06:41 +00006248 // The two objects are identical. If we know that one of them isn't NaN then
6249 // we now know they test equal.
6250 if (cc != eq || !never_nan_nan) {
6251 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00006252
Leon Clarkee46be812010-01-19 14:06:41 +00006253 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6254 // so we do the second best thing - test it ourselves.
6255 // They are both equal and they are not both Smis so both of them are not
6256 // Smis. If it's not a heap number, then return equal.
6257 if (cc == lt || cc == gt) {
6258 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006259 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00006260 } else {
6261 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6262 __ b(eq, &heap_number);
6263 // Comparing JS objects with <=, >= is complicated.
6264 if (cc != eq) {
6265 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
6266 __ b(ge, slow);
6267 // Normally here we fall through to return_equal, but undefined is
6268 // special: (undefined == undefined) == true, but
6269 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
6270 if (cc == le || cc == ge) {
6271 __ cmp(r4, Operand(ODDBALL_TYPE));
6272 __ b(ne, &return_equal);
6273 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01006274 __ cmp(r0, r2);
Leon Clarkee46be812010-01-19 14:06:41 +00006275 __ b(ne, &return_equal);
6276 if (cc == le) {
6277 // undefined <= undefined should fail.
6278 __ mov(r0, Operand(GREATER));
6279 } else {
6280 // undefined >= undefined should fail.
6281 __ mov(r0, Operand(LESS));
6282 }
6283 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00006284 }
Steve Blockd0582a62009-12-15 09:54:21 +00006285 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006286 }
6287 }
Leon Clarkee46be812010-01-19 14:06:41 +00006288
Steve Blocka7e24c12009-10-30 11:49:00 +00006289 __ bind(&return_equal);
6290 if (cc == lt) {
6291 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
6292 } else if (cc == gt) {
6293 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
6294 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00006295 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00006296 }
6297 __ mov(pc, Operand(lr)); // Return.
6298
Leon Clarkee46be812010-01-19 14:06:41 +00006299 if (cc != eq || !never_nan_nan) {
6300 // For less and greater we don't have to check for NaN since the result of
6301 // x < x is false regardless. For the others here is some code to check
6302 // for NaN.
6303 if (cc != lt && cc != gt) {
6304 __ bind(&heap_number);
6305 // It is a heap number, so return non-equal if it's NaN and equal if it's
6306 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00006307
Leon Clarkee46be812010-01-19 14:06:41 +00006308 // The representation of NaN values has all exponent bits (52..62) set,
6309 // and not all mantissa bits (0..51) clear.
6310 // Read top bits of double representation (second word of value).
6311 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6312 // Test that exponent bits are all set.
6313 __ and_(r3, r2, Operand(exp_mask_reg));
6314 __ cmp(r3, Operand(exp_mask_reg));
6315 __ b(ne, &return_equal);
6316
6317 // Shift out flag and all exponent bits, retaining only mantissa.
6318 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
6319 // Or with all low-bits of mantissa.
6320 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6321 __ orr(r0, r3, Operand(r2), SetCC);
6322 // For equal we already have the right value in r0: Return zero (equal)
6323 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
6324 // not (it's a NaN). For <= and >= we need to load r0 with the failing
6325 // value if it's a NaN.
6326 if (cc != eq) {
6327 // All-zero means Infinity means equal.
6328 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
6329 if (cc == le) {
6330 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
6331 } else {
6332 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
6333 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006334 }
Leon Clarkee46be812010-01-19 14:06:41 +00006335 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00006336 }
Leon Clarkee46be812010-01-19 14:06:41 +00006337 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00006338 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006339
6340 __ bind(&not_identical);
6341}
6342
6343
6344// See comment at call site.
6345static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00006346 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00006347 Label* slow,
6348 bool strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006349 Label rhs_is_smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00006350 __ tst(r0, Operand(kSmiTagMask));
Leon Clarked91b9f72010-01-27 17:25:45 +00006351 __ b(eq, &rhs_is_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00006352
Leon Clarked91b9f72010-01-27 17:25:45 +00006353 // Lhs is a Smi. Check whether the rhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00006354 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6355 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006356 // If rhs is not a number and lhs is a Smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00006357 // succeed. Return non-equal (r0 is already not zero)
6358 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
6359 } else {
6360 // Smi compared non-strictly with a non-Smi non-heap-number. Call
6361 // the runtime.
6362 __ b(ne, slow);
6363 }
6364
Leon Clarked91b9f72010-01-27 17:25:45 +00006365 // Lhs (r1) is a smi, rhs (r0) is a number.
Steve Blockd0582a62009-12-15 09:54:21 +00006366 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006367 // Convert lhs to a double in d7 .
Steve Blockd0582a62009-12-15 09:54:21 +00006368 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00006369 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
6370 __ vmov(s15, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01006371 __ vcvt_f64_s32(d7, s15);
Leon Clarked91b9f72010-01-27 17:25:45 +00006372 // Load the double from rhs, tagged HeapNumber r0, to d6.
6373 __ sub(r7, r0, Operand(kHeapObjectTag));
6374 __ vldr(d6, r7, HeapNumber::kValueOffset);
Steve Blockd0582a62009-12-15 09:54:21 +00006375 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006376 __ push(lr);
6377 // Convert lhs to a double in r2, r3.
Steve Blockd0582a62009-12-15 09:54:21 +00006378 __ mov(r7, Operand(r1));
6379 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6380 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00006381 // Load rhs to a double in r0, r1.
Leon Clarkef7060e22010-06-03 12:02:55 +01006382 __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006383 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00006384 }
6385
Steve Blocka7e24c12009-10-30 11:49:00 +00006386 // We now have both loaded as doubles but we can skip the lhs nan check
Leon Clarked91b9f72010-01-27 17:25:45 +00006387 // since it's a smi.
Leon Clarkee46be812010-01-19 14:06:41 +00006388 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00006389
Leon Clarked91b9f72010-01-27 17:25:45 +00006390 __ bind(&rhs_is_smi);
6391 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00006392 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
6393 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006394 // If lhs is not a number and rhs is a smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00006395 // succeed. Return non-equal.
6396 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
6397 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
6398 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006399 // Smi compared non-strictly with a non-smi non-heap-number. Call
Steve Blocka7e24c12009-10-30 11:49:00 +00006400 // the runtime.
6401 __ b(ne, slow);
6402 }
6403
Leon Clarked91b9f72010-01-27 17:25:45 +00006404 // Rhs (r0) is a smi, lhs (r1) is a heap number.
Steve Blockd0582a62009-12-15 09:54:21 +00006405 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006406 // Convert rhs to a double in d6 .
Steve Blockd0582a62009-12-15 09:54:21 +00006407 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00006408 // Load the double from lhs, tagged HeapNumber r1, to d7.
6409 __ sub(r7, r1, Operand(kHeapObjectTag));
6410 __ vldr(d7, r7, HeapNumber::kValueOffset);
6411 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6412 __ vmov(s13, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01006413 __ vcvt_f64_s32(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00006414 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006415 __ push(lr);
6416 // Load lhs to a double in r2, r3.
Leon Clarkef7060e22010-06-03 12:02:55 +01006417 __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006418 // Convert rhs to a double in r0, r1.
Steve Blockd0582a62009-12-15 09:54:21 +00006419 __ mov(r7, Operand(r0));
6420 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6421 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00006422 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00006423 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006424 // Fall through to both_loaded_as_doubles.
6425}
6426
6427
Leon Clarkee46be812010-01-19 14:06:41 +00006428void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006429 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00006430 Register rhs_exponent = exp_first ? r0 : r1;
6431 Register lhs_exponent = exp_first ? r2 : r3;
6432 Register rhs_mantissa = exp_first ? r1 : r0;
6433 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00006434 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00006435 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00006436
6437 Register exp_mask_reg = r5;
6438
6439 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00006440 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
6441 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00006442 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00006443 __ mov(r4,
6444 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6445 SetCC);
6446 __ b(ne, &one_is_nan);
6447 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00006448 __ b(ne, &one_is_nan);
6449
6450 __ bind(lhs_not_nan);
6451 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
6452 __ bind(&lhs_not_nan_exp_mask_is_loaded);
6453 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
6454 __ cmp(r4, Operand(exp_mask_reg));
6455 __ b(ne, &neither_is_nan);
6456 __ mov(r4,
6457 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6458 SetCC);
6459 __ b(ne, &one_is_nan);
6460 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00006461 __ b(eq, &neither_is_nan);
6462
6463 __ bind(&one_is_nan);
6464 // NaN comparisons always fail.
6465 // Load whatever we need in r0 to make the comparison fail.
6466 if (cc == lt || cc == le) {
6467 __ mov(r0, Operand(GREATER));
6468 } else {
6469 __ mov(r0, Operand(LESS));
6470 }
6471 __ mov(pc, Operand(lr)); // Return.
6472
6473 __ bind(&neither_is_nan);
6474}
6475
6476
6477// See comment at call site.
6478static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
6479 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00006480 Register rhs_exponent = exp_first ? r0 : r1;
6481 Register lhs_exponent = exp_first ? r2 : r3;
6482 Register rhs_mantissa = exp_first ? r1 : r0;
6483 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00006484
6485 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
6486 if (cc == eq) {
6487 // Doubles are not equal unless they have the same bit pattern.
6488 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00006489 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
6490 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00006491 // Return non-zero if the numbers are unequal.
6492 __ mov(pc, Operand(lr), LeaveCC, ne);
6493
Leon Clarkee46be812010-01-19 14:06:41 +00006494 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00006495 // If exponents are equal then return 0.
6496 __ mov(pc, Operand(lr), LeaveCC, eq);
6497
6498 // Exponents are unequal. The only way we can return that the numbers
6499 // are equal is if one is -0 and the other is 0. We already dealt
6500 // with the case where both are -0 or both are 0.
6501 // We start by seeing if the mantissas (that are equal) or the bottom
6502 // 31 bits of the rhs exponent are non-zero. If so we return not
6503 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00006504 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00006505 __ mov(r0, Operand(r4), LeaveCC, ne);
6506 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
6507 // Now they are equal if and only if the lhs exponent is zero in its
6508 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00006509 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00006510 __ mov(pc, Operand(lr));
6511 } else {
6512 // Call a native function to do a comparison between two non-NaNs.
6513 // Call C routine that may not cause GC or other trouble.
Steve Block6ded16b2010-05-10 14:33:55 +01006514 __ push(lr);
6515 __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
6516 __ CallCFunction(ExternalReference::compare_doubles(), 4);
6517 __ pop(pc); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00006518 }
6519}
6520
6521
6522// See comment at call site.
6523static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
6524 // If either operand is a JSObject or an oddball value, then they are
6525 // not equal since their pointers are different.
6526 // There is no test for undetectability in strict equality.
6527 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6528 Label first_non_object;
6529 // Get the type of the first operand into r2 and compare it with
6530 // FIRST_JS_OBJECT_TYPE.
6531 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
6532 __ b(lt, &first_non_object);
6533
6534 // Return non-zero (r0 is not zero)
6535 Label return_not_equal;
6536 __ bind(&return_not_equal);
6537 __ mov(pc, Operand(lr)); // Return.
6538
6539 __ bind(&first_non_object);
6540 // Check for oddballs: true, false, null, undefined.
6541 __ cmp(r2, Operand(ODDBALL_TYPE));
6542 __ b(eq, &return_not_equal);
6543
6544 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
6545 __ b(ge, &return_not_equal);
6546
6547 // Check for oddballs: true, false, null, undefined.
6548 __ cmp(r3, Operand(ODDBALL_TYPE));
6549 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00006550
6551 // Now that we have the types we might as well check for symbol-symbol.
6552 // Ensure that no non-strings have the symbol bit set.
6553 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
6554 ASSERT(kSymbolTag != 0);
6555 __ and_(r2, r2, Operand(r3));
6556 __ tst(r2, Operand(kIsSymbolMask));
6557 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00006558}
6559
6560
6561// See comment at call site.
6562static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
6563 Label* both_loaded_as_doubles,
6564 Label* not_heap_numbers,
6565 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00006566 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006567 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00006568 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
6569 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00006570 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
6571
6572 // Both are heap numbers. Load them up then jump to the code we have
6573 // for that.
Leon Clarked91b9f72010-01-27 17:25:45 +00006574 if (CpuFeatures::IsSupported(VFP3)) {
6575 CpuFeatures::Scope scope(VFP3);
6576 __ sub(r7, r0, Operand(kHeapObjectTag));
6577 __ vldr(d6, r7, HeapNumber::kValueOffset);
6578 __ sub(r7, r1, Operand(kHeapObjectTag));
6579 __ vldr(d7, r7, HeapNumber::kValueOffset);
6580 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +01006581 __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
6582 __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006583 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006584 __ jmp(both_loaded_as_doubles);
6585}
6586
6587
6588// Fast negative check for symbol-to-symbol equality.
6589static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
6590 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00006591 // Ensure that no non-strings have the symbol bit set.
6592 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
6593 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006594 __ tst(r2, Operand(kIsSymbolMask));
6595 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00006596 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
6597 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00006598 __ tst(r3, Operand(kIsSymbolMask));
6599 __ b(eq, slow);
6600
6601 // Both are symbols. We already checked they weren't the same pointer
6602 // so they are not equal.
6603 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
6604 __ mov(pc, Operand(lr)); // Return.
6605}
6606
6607
Steve Block6ded16b2010-05-10 14:33:55 +01006608void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
6609 Register object,
6610 Register result,
6611 Register scratch1,
6612 Register scratch2,
6613 Register scratch3,
6614 bool object_is_smi,
6615 Label* not_found) {
6616 // Use of registers. Register result is used as a temporary.
6617 Register number_string_cache = result;
6618 Register mask = scratch3;
6619
6620 // Load the number string cache.
6621 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
6622
6623 // Make the hash mask from the length of the number string cache. It
6624 // contains two elements (number and string) for each cache entry.
6625 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
6626 // Divide length by two (length is not a smi).
6627 __ mov(mask, Operand(mask, ASR, 1));
6628 __ sub(mask, mask, Operand(1)); // Make mask.
6629
6630 // Calculate the entry in the number string cache. The hash value in the
6631 // number string cache for smis is just the smi value, and the hash for
6632 // doubles is the xor of the upper and lower words. See
6633 // Heap::GetNumberStringCache.
6634 Label is_smi;
6635 Label load_result_from_cache;
6636 if (!object_is_smi) {
6637 __ BranchOnSmi(object, &is_smi);
6638 if (CpuFeatures::IsSupported(VFP3)) {
6639 CpuFeatures::Scope scope(VFP3);
6640 __ CheckMap(object,
6641 scratch1,
6642 Factory::heap_number_map(),
6643 not_found,
6644 true);
6645
6646 ASSERT_EQ(8, kDoubleSize);
6647 __ add(scratch1,
6648 object,
6649 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
6650 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
6651 __ eor(scratch1, scratch1, Operand(scratch2));
6652 __ and_(scratch1, scratch1, Operand(mask));
6653
6654 // Calculate address of entry in string cache: each entry consists
6655 // of two pointer sized fields.
6656 __ add(scratch1,
6657 number_string_cache,
6658 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
6659
6660 Register probe = mask;
6661 __ ldr(probe,
6662 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
6663 __ BranchOnSmi(probe, not_found);
6664 __ sub(scratch2, object, Operand(kHeapObjectTag));
6665 __ vldr(d0, scratch2, HeapNumber::kValueOffset);
6666 __ sub(probe, probe, Operand(kHeapObjectTag));
6667 __ vldr(d1, probe, HeapNumber::kValueOffset);
6668 __ vcmp(d0, d1);
6669 __ vmrs(pc);
6670 __ b(ne, not_found); // The cache did not contain this value.
6671 __ b(&load_result_from_cache);
6672 } else {
6673 __ b(not_found);
6674 }
6675 }
6676
6677 __ bind(&is_smi);
6678 Register scratch = scratch1;
6679 __ and_(scratch, mask, Operand(object, ASR, 1));
6680 // Calculate address of entry in string cache: each entry consists
6681 // of two pointer sized fields.
6682 __ add(scratch,
6683 number_string_cache,
6684 Operand(scratch, LSL, kPointerSizeLog2 + 1));
6685
6686 // Check if the entry is the smi we are looking for.
6687 Register probe = mask;
6688 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
6689 __ cmp(object, probe);
6690 __ b(ne, not_found);
6691
6692 // Get the result from the cache.
6693 __ bind(&load_result_from_cache);
6694 __ ldr(result,
6695 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
6696 __ IncrementCounter(&Counters::number_to_string_native,
6697 1,
6698 scratch1,
6699 scratch2);
6700}
6701
6702
6703void NumberToStringStub::Generate(MacroAssembler* masm) {
6704 Label runtime;
6705
6706 __ ldr(r1, MemOperand(sp, 0));
6707
6708 // Generate code to lookup number in the number string cache.
6709 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
6710 __ add(sp, sp, Operand(1 * kPointerSize));
6711 __ Ret();
6712
6713 __ bind(&runtime);
6714 // Handle number to string in the runtime system if not found in the cache.
6715 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
6716}
6717
6718
6719void RecordWriteStub::Generate(MacroAssembler* masm) {
6720 __ RecordWriteHelper(object_, offset_, scratch_);
6721 __ Ret();
6722}
6723
6724
Leon Clarked91b9f72010-01-27 17:25:45 +00006725// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
6726// On exit r0 is 0, positive or negative to indicate the result of
6727// the comparison.
Steve Blocka7e24c12009-10-30 11:49:00 +00006728void CompareStub::Generate(MacroAssembler* masm) {
6729 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00006730 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00006731
6732 // NOTICE! This code is only reached after a smi-fast-case check, so
6733 // it is certain that at least one operand isn't a smi.
6734
6735 // Handle the case where the objects are identical. Either returns the answer
6736 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00006737 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006738
6739 // If either is a Smi (we know that not both are), then they can only
6740 // be strictly equal if the other is a HeapNumber.
6741 ASSERT_EQ(0, kSmiTag);
6742 ASSERT_EQ(0, Smi::FromInt(0));
6743 __ and_(r2, r0, Operand(r1));
6744 __ tst(r2, Operand(kSmiTagMask));
6745 __ b(ne, &not_smis);
6746 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
6747 // 1) Return the answer.
6748 // 2) Go to slow.
6749 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00006750 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00006751 // In cases 3 and 4 we have found out we were dealing with a number-number
Leon Clarked91b9f72010-01-27 17:25:45 +00006752 // comparison. If VFP3 is supported the double values of the numbers have
6753 // been loaded into d7 and d6. Otherwise, the double values have been loaded
6754 // into r0, r1, r2, and r3.
Leon Clarkee46be812010-01-19 14:06:41 +00006755 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006756
6757 __ bind(&both_loaded_as_doubles);
Leon Clarked91b9f72010-01-27 17:25:45 +00006758 // The arguments have been converted to doubles and stored in d6 and d7, if
6759 // VFP3 is supported, or in r0, r1, r2, and r3.
Steve Blockd0582a62009-12-15 09:54:21 +00006760 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00006761 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00006762 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00006763 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00006764 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00006765 __ vcmp(d7, d6);
6766 __ vmrs(pc); // Move vector status bits to normal status bits.
6767 Label nan;
6768 __ b(vs, &nan);
6769 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6770 __ mov(r0, Operand(LESS), LeaveCC, lt);
6771 __ mov(r0, Operand(GREATER), LeaveCC, gt);
6772 __ mov(pc, Operand(lr));
6773
6774 __ bind(&nan);
6775 // If one of the sides was a NaN then the v flag is set. Load r0 with
6776 // whatever it takes to make the comparison fail, since comparisons with NaN
6777 // always fail.
6778 if (cc_ == lt || cc_ == le) {
6779 __ mov(r0, Operand(GREATER));
6780 } else {
6781 __ mov(r0, Operand(LESS));
6782 }
Steve Blockd0582a62009-12-15 09:54:21 +00006783 __ mov(pc, Operand(lr));
6784 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00006785 // Checks for NaN in the doubles we have loaded. Can return the answer or
6786 // fall through if neither is a NaN. Also binds lhs_not_nan.
6787 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00006788 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
6789 // answer. Never falls through.
6790 EmitTwoNonNanDoubleComparison(masm, cc_);
6791 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006792
6793 __ bind(&not_smis);
6794 // At this point we know we are dealing with two different objects,
6795 // and neither of them is a Smi. The objects are in r0 and r1.
6796 if (strict_) {
6797 // This returns non-equal for some object types, or falls through if it
6798 // was not lucky.
6799 EmitStrictTwoHeapObjectCompare(masm);
6800 }
6801
6802 Label check_for_symbols;
Leon Clarked91b9f72010-01-27 17:25:45 +00006803 Label flat_string_check;
Steve Blocka7e24c12009-10-30 11:49:00 +00006804 // Check for heap-number-heap-number comparison. Can jump to slow case,
6805 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
6806 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00006807 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00006808 EmitCheckForTwoHeapNumbers(masm,
6809 &both_loaded_as_doubles,
6810 &check_for_symbols,
Leon Clarked91b9f72010-01-27 17:25:45 +00006811 &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00006812
6813 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00006814 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
6815 // symbols.
6816 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006817 // Either jumps to slow or returns the answer. Assumes that r2 is the type
6818 // of r0 on entry.
Leon Clarked91b9f72010-01-27 17:25:45 +00006819 EmitCheckForSymbols(masm, &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00006820 }
6821
Leon Clarked91b9f72010-01-27 17:25:45 +00006822 // Check for both being sequential ASCII strings, and inline if that is the
6823 // case.
6824 __ bind(&flat_string_check);
6825
6826 __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
6827
6828 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
6829 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
6830 r1,
6831 r0,
6832 r2,
6833 r3,
6834 r4,
6835 r5);
6836 // Never falls through to here.
6837
Steve Blocka7e24c12009-10-30 11:49:00 +00006838 __ bind(&slow);
Leon Clarked91b9f72010-01-27 17:25:45 +00006839
Steve Block6ded16b2010-05-10 14:33:55 +01006840 __ Push(r1, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006841 // Figure out which native to call and setup the arguments.
6842 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00006843 if (cc_ == eq) {
6844 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6845 } else {
6846 native = Builtins::COMPARE;
6847 int ncr; // NaN compare result
6848 if (cc_ == lt || cc_ == le) {
6849 ncr = GREATER;
6850 } else {
6851 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
6852 ncr = LESS;
6853 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006854 __ mov(r0, Operand(Smi::FromInt(ncr)));
6855 __ push(r0);
6856 }
6857
6858 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6859 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00006860 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00006861}
6862
6863
Steve Blocka7e24c12009-10-30 11:49:00 +00006864// We fall into this code if the operands were Smis, but the result was
6865// not (eg. overflow). We branch into this code (to the not_smi label) if
6866// the operands were not both Smi. The operands are in r0 and r1. In order
6867// to call the C-implemented binary fp operation routines we need to end up
6868// with the double precision floating point operands in r0 and r1 (for the
6869// value in r1) and r2 and r3 (for the value in r0).
Steve Block6ded16b2010-05-10 14:33:55 +01006870void GenericBinaryOpStub::HandleBinaryOpSlowCases(
6871 MacroAssembler* masm,
6872 Label* not_smi,
6873 Register lhs,
6874 Register rhs,
6875 const Builtins::JavaScript& builtin) {
6876 Label slow, slow_reverse, do_the_call;
6877 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
Steve Blockd0582a62009-12-15 09:54:21 +00006878
Steve Block6ded16b2010-05-10 14:33:55 +01006879 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
6880
6881 if (ShouldGenerateSmiCode()) {
6882 // Smi-smi case (overflow).
6883 // Since both are Smis there is no heap number to overwrite, so allocate.
6884 // The new heap number is in r5. r6 and r7 are scratch.
6885 __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
6886
6887 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
6888 // using registers d7 and d6 for the double values.
6889 if (use_fp_registers) {
6890 CpuFeatures::Scope scope(VFP3);
6891 __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
6892 __ vmov(s15, r7);
6893 __ vcvt_f64_s32(d7, s15);
6894 __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
6895 __ vmov(s13, r7);
6896 __ vcvt_f64_s32(d6, s13);
6897 } else {
6898 // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
6899 __ mov(r7, Operand(rhs));
6900 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6901 __ push(lr);
6902 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
6903 // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
6904 __ mov(r7, Operand(lhs));
6905 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6906 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
6907 __ pop(lr);
6908 }
6909 __ jmp(&do_the_call); // Tail call. No return.
Steve Blockd0582a62009-12-15 09:54:21 +00006910 }
6911
Steve Block6ded16b2010-05-10 14:33:55 +01006912 // We branch here if at least one of r0 and r1 is not a Smi.
6913 __ bind(not_smi);
6914
6915 // After this point we have the left hand side in r1 and the right hand side
6916 // in r0.
6917 if (lhs.is(r0)) {
6918 __ Swap(r0, r1, ip);
6919 }
6920
6921 if (ShouldGenerateFPCode()) {
6922 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
6923
6924 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
6925 switch (op_) {
6926 case Token::ADD:
6927 case Token::SUB:
6928 case Token::MUL:
6929 case Token::DIV:
6930 GenerateTypeTransition(masm);
6931 break;
6932
6933 default:
6934 break;
6935 }
6936 }
6937
6938 if (mode_ == NO_OVERWRITE) {
6939 // In the case where there is no chance of an overwritable float we may as
6940 // well do the allocation immediately while r0 and r1 are untouched.
6941 __ AllocateHeapNumber(r5, r6, r7, &slow);
6942 }
6943
6944 // Move r0 to a double in r2-r3.
6945 __ tst(r0, Operand(kSmiTagMask));
6946 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
6947 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6948 __ b(ne, &slow);
6949 if (mode_ == OVERWRITE_RIGHT) {
6950 __ mov(r5, Operand(r0)); // Overwrite this heap number.
6951 }
6952 if (use_fp_registers) {
6953 CpuFeatures::Scope scope(VFP3);
6954 // Load the double from tagged HeapNumber r0 to d7.
6955 __ sub(r7, r0, Operand(kHeapObjectTag));
6956 __ vldr(d7, r7, HeapNumber::kValueOffset);
6957 } else {
6958 // Calling convention says that second double is in r2 and r3.
Leon Clarkef7060e22010-06-03 12:02:55 +01006959 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01006960 }
6961 __ jmp(&finished_loading_r0);
6962 __ bind(&r0_is_smi);
6963 if (mode_ == OVERWRITE_RIGHT) {
6964 // We can't overwrite a Smi so get address of new heap number into r5.
6965 __ AllocateHeapNumber(r5, r6, r7, &slow);
6966 }
6967
6968 if (use_fp_registers) {
6969 CpuFeatures::Scope scope(VFP3);
6970 // Convert smi in r0 to double in d7.
6971 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6972 __ vmov(s15, r7);
6973 __ vcvt_f64_s32(d7, s15);
6974 } else {
6975 // Write Smi from r0 to r3 and r2 in double format.
6976 __ mov(r7, Operand(r0));
6977 ConvertToDoubleStub stub3(r3, r2, r7, r6);
6978 __ push(lr);
6979 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
6980 __ pop(lr);
6981 }
6982
6983 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
6984 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
6985 Label r1_is_not_smi;
6986 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
6987 __ tst(r1, Operand(kSmiTagMask));
6988 __ b(ne, &r1_is_not_smi);
6989 GenerateTypeTransition(masm);
6990 __ jmp(&r1_is_smi);
6991 }
6992
6993 __ bind(&finished_loading_r0);
6994
6995 // Move r1 to a double in r0-r1.
6996 __ tst(r1, Operand(kSmiTagMask));
6997 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
6998 __ bind(&r1_is_not_smi);
6999 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
7000 __ b(ne, &slow);
7001 if (mode_ == OVERWRITE_LEFT) {
7002 __ mov(r5, Operand(r1)); // Overwrite this heap number.
7003 }
7004 if (use_fp_registers) {
7005 CpuFeatures::Scope scope(VFP3);
7006 // Load the double from tagged HeapNumber r1 to d6.
7007 __ sub(r7, r1, Operand(kHeapObjectTag));
7008 __ vldr(d6, r7, HeapNumber::kValueOffset);
7009 } else {
7010 // Calling convention says that first double is in r0 and r1.
Leon Clarkef7060e22010-06-03 12:02:55 +01007011 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01007012 }
7013 __ jmp(&finished_loading_r1);
7014 __ bind(&r1_is_smi);
7015 if (mode_ == OVERWRITE_LEFT) {
7016 // We can't overwrite a Smi so get address of new heap number into r5.
7017 __ AllocateHeapNumber(r5, r6, r7, &slow);
7018 }
7019
7020 if (use_fp_registers) {
7021 CpuFeatures::Scope scope(VFP3);
7022 // Convert smi in r1 to double in d6.
7023 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7024 __ vmov(s13, r7);
7025 __ vcvt_f64_s32(d6, s13);
7026 } else {
7027 // Write Smi from r1 to r1 and r0 in double format.
7028 __ mov(r7, Operand(r1));
7029 ConvertToDoubleStub stub4(r1, r0, r7, r6);
7030 __ push(lr);
7031 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7032 __ pop(lr);
7033 }
7034
7035 __ bind(&finished_loading_r1);
7036
7037 __ bind(&do_the_call);
7038 // If we are inlining the operation using VFP3 instructions for
7039 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
7040 if (use_fp_registers) {
7041 CpuFeatures::Scope scope(VFP3);
7042 // ARMv7 VFP3 instructions to implement
7043 // double precision, add, subtract, multiply, divide.
7044
7045 if (Token::MUL == op_) {
7046 __ vmul(d5, d6, d7);
7047 } else if (Token::DIV == op_) {
7048 __ vdiv(d5, d6, d7);
7049 } else if (Token::ADD == op_) {
7050 __ vadd(d5, d6, d7);
7051 } else if (Token::SUB == op_) {
7052 __ vsub(d5, d6, d7);
7053 } else {
7054 UNREACHABLE();
7055 }
7056 __ sub(r0, r5, Operand(kHeapObjectTag));
7057 __ vstr(d5, r0, HeapNumber::kValueOffset);
7058 __ add(r0, r0, Operand(kHeapObjectTag));
7059 __ mov(pc, lr);
7060 } else {
7061 // If we did not inline the operation, then the arguments are in:
7062 // r0: Left value (least significant part of mantissa).
7063 // r1: Left value (sign, exponent, top of mantissa).
7064 // r2: Right value (least significant part of mantissa).
7065 // r3: Right value (sign, exponent, top of mantissa).
7066 // r5: Address of heap number for result.
7067
7068 __ push(lr); // For later.
7069 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
7070 // Call C routine that may not cause GC or other trouble. r5 is callee
7071 // save.
7072 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7073 // Store answer in the overwritable heap number.
7074 #if !defined(USE_ARM_EABI)
7075 // Double returned in fp coprocessor register 0 and 1, encoded as register
7076 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
7077 // substract the tag from r5.
7078 __ sub(r4, r5, Operand(kHeapObjectTag));
7079 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7080 #else
7081 // Double returned in registers 0 and 1.
Leon Clarkef7060e22010-06-03 12:02:55 +01007082 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01007083 #endif
7084 __ mov(r0, Operand(r5));
7085 // And we are done.
7086 __ pop(pc);
7087 }
7088 }
7089
7090
7091 if (lhs.is(r0)) {
7092 __ b(&slow);
7093 __ bind(&slow_reverse);
7094 __ Swap(r0, r1, ip);
7095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007096
7097 // We jump to here if something goes wrong (one param is not a number of any
7098 // sort or new-space allocation fails).
7099 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00007100
7101 // Push arguments to the stack
Steve Block6ded16b2010-05-10 14:33:55 +01007102 __ Push(r1, r0);
Steve Blockd0582a62009-12-15 09:54:21 +00007103
Steve Block6ded16b2010-05-10 14:33:55 +01007104 if (Token::ADD == op_) {
Steve Blockd0582a62009-12-15 09:54:21 +00007105 // Test for string arguments before calling runtime.
7106 // r1 : first argument
7107 // r0 : second argument
7108 // sp[0] : second argument
Andrei Popescu31002712010-02-23 13:46:05 +00007109 // sp[4] : first argument
Steve Blockd0582a62009-12-15 09:54:21 +00007110
Steve Block6ded16b2010-05-10 14:33:55 +01007111 Label not_strings, not_string1, string1, string1_smi2;
Steve Blockd0582a62009-12-15 09:54:21 +00007112 __ tst(r1, Operand(kSmiTagMask));
7113 __ b(eq, &not_string1);
7114 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
7115 __ b(ge, &not_string1);
7116
7117 // First argument is a a string, test second.
7118 __ tst(r0, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01007119 __ b(eq, &string1_smi2);
Steve Blockd0582a62009-12-15 09:54:21 +00007120 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7121 __ b(ge, &string1);
7122
7123 // First and second argument are strings.
Steve Block6ded16b2010-05-10 14:33:55 +01007124 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
7125 __ TailCallStub(&string_add_stub);
7126
7127 __ bind(&string1_smi2);
7128 // First argument is a string, second is a smi. Try to lookup the number
7129 // string for the smi in the number string cache.
7130 NumberToStringStub::GenerateLookupNumberStringCache(
7131 masm, r0, r2, r4, r5, r6, true, &string1);
7132
7133 // Replace second argument on stack and tailcall string add stub to make
7134 // the result.
7135 __ str(r2, MemOperand(sp, 0));
7136 __ TailCallStub(&string_add_stub);
Steve Blockd0582a62009-12-15 09:54:21 +00007137
7138 // Only first argument is a string.
7139 __ bind(&string1);
7140 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
7141
7142 // First argument was not a string, test second.
7143 __ bind(&not_string1);
7144 __ tst(r0, Operand(kSmiTagMask));
7145 __ b(eq, &not_strings);
7146 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7147 __ b(ge, &not_strings);
7148
7149 // Only second argument is a string.
Steve Blockd0582a62009-12-15 09:54:21 +00007150 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
7151
7152 __ bind(&not_strings);
7153 }
7154
Steve Blocka7e24c12009-10-30 11:49:00 +00007155 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
Steve Blocka7e24c12009-10-30 11:49:00 +00007156}
7157
7158
7159// Tries to get a signed int32 out of a double precision floating point heap
7160// number. Rounds towards 0. Fastest for doubles that are in the ranges
7161// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
7162// almost to the range of signed int32 values that are not Smis. Jumps to the
7163// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
7164// (excluding the endpoints).
7165static void GetInt32(MacroAssembler* masm,
7166 Register source,
7167 Register dest,
7168 Register scratch,
7169 Register scratch2,
7170 Label* slow) {
7171 Label right_exponent, done;
7172 // Get exponent word.
7173 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
7174 // Get exponent alone in scratch2.
7175 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
7176 // Load dest with zero. We use this either for the final shift or
7177 // for the answer.
7178 __ mov(dest, Operand(0));
7179 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
7180 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
7181 // the exponent that we are fastest at and also the highest exponent we can
7182 // handle here.
7183 const uint32_t non_smi_exponent =
7184 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
7185 __ cmp(scratch2, Operand(non_smi_exponent));
7186 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
7187 __ b(eq, &right_exponent);
7188 // If the exponent is higher than that then go to slow case. This catches
7189 // numbers that don't fit in a signed int32, infinities and NaNs.
7190 __ b(gt, slow);
7191
7192 // We know the exponent is smaller than 30 (biased). If it is less than
7193 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7194 // it rounds to zero.
7195 const uint32_t zero_exponent =
7196 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
7197 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
7198 // Dest already has a Smi zero.
7199 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00007200 if (!CpuFeatures::IsSupported(VFP3)) {
7201 // We have a shifted exponent between 0 and 30 in scratch2.
7202 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
7203 // We now have the exponent in dest. Subtract from 30 to get
7204 // how much to shift down.
7205 __ rsb(dest, dest, Operand(30));
7206 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007207 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00007208 if (CpuFeatures::IsSupported(VFP3)) {
7209 CpuFeatures::Scope scope(VFP3);
7210 // ARMv7 VFP3 instructions implementing double precision to integer
7211 // conversion using round to zero.
7212 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00007213 __ vmov(d7, scratch2, scratch);
Steve Block6ded16b2010-05-10 14:33:55 +01007214 __ vcvt_s32_f64(s15, d7);
Leon Clarkee46be812010-01-19 14:06:41 +00007215 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00007216 } else {
7217 // Get the top bits of the mantissa.
7218 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
7219 // Put back the implicit 1.
7220 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
7221 // Shift up the mantissa bits to take up the space the exponent used to
7222 // take. We just orred in the implicit bit so that took care of one and
7223 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
7224 // distance.
7225 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7226 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
7227 // Put sign in zero flag.
7228 __ tst(scratch, Operand(HeapNumber::kSignMask));
7229 // Get the second half of the double. For some exponents we don't
7230 // actually need this because the bits get shifted out again, but
7231 // it's probably slower to test than just to do it.
7232 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7233 // Shift down 22 bits to get the last 10 bits.
7234 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
7235 // Move down according to the exponent.
7236 __ mov(dest, Operand(scratch, LSR, dest));
7237 // Fix sign if sign bit was set.
7238 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
7239 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007240 __ bind(&done);
7241}
7242
Steve Blocka7e24c12009-10-30 11:49:00 +00007243// For bitwise ops where the inputs are not both Smis we here try to determine
7244// whether both inputs are either Smis or at least heap numbers that can be
7245// represented by a 32 bit signed value. We truncate towards zero as required
7246// by the ES spec. If this is the case we do the bitwise op and see if the
7247// result is a Smi. If so, great, otherwise we try to find a heap number to
7248// write the answer into (either by allocating or by overwriting).
Steve Block6ded16b2010-05-10 14:33:55 +01007249// On entry the operands are in lhs and rhs. On exit the answer is in r0.
7250void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
7251 Register lhs,
7252 Register rhs) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007253 Label slow, result_not_a_smi;
Steve Block6ded16b2010-05-10 14:33:55 +01007254 Label rhs_is_smi, lhs_is_smi;
7255 Label done_checking_rhs, done_checking_lhs;
Steve Blocka7e24c12009-10-30 11:49:00 +00007256
Steve Block6ded16b2010-05-10 14:33:55 +01007257 __ tst(lhs, Operand(kSmiTagMask));
7258 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
7259 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00007260 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007261 GetInt32(masm, lhs, r3, r5, r4, &slow);
7262 __ jmp(&done_checking_lhs);
7263 __ bind(&lhs_is_smi);
7264 __ mov(r3, Operand(lhs, ASR, 1));
7265 __ bind(&done_checking_lhs);
Steve Blocka7e24c12009-10-30 11:49:00 +00007266
Steve Block6ded16b2010-05-10 14:33:55 +01007267 __ tst(rhs, Operand(kSmiTagMask));
7268 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
7269 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00007270 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007271 GetInt32(masm, rhs, r2, r5, r4, &slow);
7272 __ jmp(&done_checking_rhs);
7273 __ bind(&rhs_is_smi);
7274 __ mov(r2, Operand(rhs, ASR, 1));
7275 __ bind(&done_checking_rhs);
7276
7277 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
Steve Blocka7e24c12009-10-30 11:49:00 +00007278
7279 // r0 and r1: Original operands (Smi or heap numbers).
7280 // r2 and r3: Signed int32 operands.
7281 switch (op_) {
7282 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
7283 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
7284 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
7285 case Token::SAR:
7286 // Use only the 5 least significant bits of the shift count.
7287 __ and_(r2, r2, Operand(0x1f));
7288 __ mov(r2, Operand(r3, ASR, r2));
7289 break;
7290 case Token::SHR:
7291 // Use only the 5 least significant bits of the shift count.
7292 __ and_(r2, r2, Operand(0x1f));
7293 __ mov(r2, Operand(r3, LSR, r2), SetCC);
7294 // SHR is special because it is required to produce a positive answer.
7295 // The code below for writing into heap numbers isn't capable of writing
7296 // the register as an unsigned int so we go to slow case if we hit this
7297 // case.
7298 __ b(mi, &slow);
7299 break;
7300 case Token::SHL:
7301 // Use only the 5 least significant bits of the shift count.
7302 __ and_(r2, r2, Operand(0x1f));
7303 __ mov(r2, Operand(r3, LSL, r2));
7304 break;
7305 default: UNREACHABLE();
7306 }
7307 // check that the *signed* result fits in a smi
7308 __ add(r3, r2, Operand(0x40000000), SetCC);
7309 __ b(mi, &result_not_a_smi);
7310 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
7311 __ Ret();
7312
7313 Label have_to_allocate, got_a_heap_number;
7314 __ bind(&result_not_a_smi);
7315 switch (mode_) {
7316 case OVERWRITE_RIGHT: {
Steve Block6ded16b2010-05-10 14:33:55 +01007317 __ tst(rhs, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007318 __ b(eq, &have_to_allocate);
Steve Block6ded16b2010-05-10 14:33:55 +01007319 __ mov(r5, Operand(rhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00007320 break;
7321 }
7322 case OVERWRITE_LEFT: {
Steve Block6ded16b2010-05-10 14:33:55 +01007323 __ tst(lhs, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007324 __ b(eq, &have_to_allocate);
Steve Block6ded16b2010-05-10 14:33:55 +01007325 __ mov(r5, Operand(lhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00007326 break;
7327 }
7328 case NO_OVERWRITE: {
7329 // Get a new heap number in r5. r6 and r7 are scratch.
Steve Block6ded16b2010-05-10 14:33:55 +01007330 __ AllocateHeapNumber(r5, r6, r7, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007331 }
7332 default: break;
7333 }
7334 __ bind(&got_a_heap_number);
7335 // r2: Answer as signed int32.
7336 // r5: Heap number to write answer into.
7337
7338 // Nothing can go wrong now, so move the heap number to r0, which is the
7339 // result.
7340 __ mov(r0, Operand(r5));
7341
7342 // Tail call that writes the int32 in r2 to the heap number in r0, using
7343 // r3 as scratch. r0 is preserved and returned.
7344 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
7345 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
7346
7347 if (mode_ != NO_OVERWRITE) {
7348 __ bind(&have_to_allocate);
7349 // Get a new heap number in r5. r6 and r7 are scratch.
Steve Block6ded16b2010-05-10 14:33:55 +01007350 __ AllocateHeapNumber(r5, r6, r7, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007351 __ jmp(&got_a_heap_number);
7352 }
7353
7354 // If all else failed then we go to the runtime system.
7355 __ bind(&slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007356 __ Push(lhs, rhs); // Restore stack.
Steve Blocka7e24c12009-10-30 11:49:00 +00007357 switch (op_) {
7358 case Token::BIT_OR:
7359 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
7360 break;
7361 case Token::BIT_AND:
7362 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
7363 break;
7364 case Token::BIT_XOR:
7365 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
7366 break;
7367 case Token::SAR:
7368 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
7369 break;
7370 case Token::SHR:
7371 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
7372 break;
7373 case Token::SHL:
7374 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
7375 break;
7376 default:
7377 UNREACHABLE();
7378 }
7379}
7380
7381
7382// Can we multiply by x with max two shifts and an add.
7383// This answers yes to all integers from 2 to 10.
7384static bool IsEasyToMultiplyBy(int x) {
7385 if (x < 2) return false; // Avoid special cases.
7386 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
7387 if (IsPowerOf2(x)) return true; // Simple shift.
7388 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
7389 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
7390 return false;
7391}
7392
7393
7394// Can multiply by anything that IsEasyToMultiplyBy returns true for.
7395// Source and destination may be the same register. This routine does
7396// not set carry and overflow the way a mul instruction would.
7397static void MultiplyByKnownInt(MacroAssembler* masm,
7398 Register source,
7399 Register destination,
7400 int known_int) {
7401 if (IsPowerOf2(known_int)) {
7402 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
7403 } else if (PopCountLessThanEqual2(known_int)) {
7404 int first_bit = BitPosition(known_int);
7405 int second_bit = BitPosition(known_int ^ (1 << first_bit));
7406 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
7407 if (first_bit != 0) {
7408 __ mov(destination, Operand(destination, LSL, first_bit));
7409 }
7410 } else {
7411 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
7412 int the_bit = BitPosition(known_int + 1);
7413 __ rsb(destination, source, Operand(source, LSL, the_bit));
7414 }
7415}
7416
7417
7418// This function (as opposed to MultiplyByKnownInt) takes the known int in a
7419// a register for the cases where it doesn't know a good trick, and may deliver
7420// a result that needs shifting.
7421static void MultiplyByKnownInt2(
7422 MacroAssembler* masm,
7423 Register result,
7424 Register source,
7425 Register known_int_register, // Smi tagged.
7426 int known_int,
7427 int* required_shift) { // Including Smi tag shift
7428 switch (known_int) {
7429 case 3:
7430 __ add(result, source, Operand(source, LSL, 1));
7431 *required_shift = 1;
7432 break;
7433 case 5:
7434 __ add(result, source, Operand(source, LSL, 2));
7435 *required_shift = 1;
7436 break;
7437 case 6:
7438 __ add(result, source, Operand(source, LSL, 1));
7439 *required_shift = 2;
7440 break;
7441 case 7:
7442 __ rsb(result, source, Operand(source, LSL, 3));
7443 *required_shift = 1;
7444 break;
7445 case 9:
7446 __ add(result, source, Operand(source, LSL, 3));
7447 *required_shift = 1;
7448 break;
7449 case 10:
7450 __ add(result, source, Operand(source, LSL, 2));
7451 *required_shift = 2;
7452 break;
7453 default:
7454 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
7455 __ mul(result, source, known_int_register);
7456 *required_shift = 0;
7457 }
7458}
7459
7460
Leon Clarkee46be812010-01-19 14:06:41 +00007461const char* GenericBinaryOpStub::GetName() {
7462 if (name_ != NULL) return name_;
7463 const int len = 100;
7464 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
7465 if (name_ == NULL) return "OOM";
7466 const char* op_name = Token::Name(op_);
7467 const char* overwrite_name;
7468 switch (mode_) {
7469 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7470 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7471 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7472 default: overwrite_name = "UnknownOverwrite"; break;
7473 }
7474
7475 OS::SNPrintF(Vector<char>(name_, len),
7476 "GenericBinaryOpStub_%s_%s%s",
7477 op_name,
7478 overwrite_name,
7479 specialized_on_rhs_ ? "_ConstantRhs" : 0);
7480 return name_;
7481}
7482
7483
Andrei Popescu31002712010-02-23 13:46:05 +00007484
Steve Blocka7e24c12009-10-30 11:49:00 +00007485void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Steve Block6ded16b2010-05-10 14:33:55 +01007486 // lhs_ : x
7487 // rhs_ : y
7488 // r0 : result
Steve Blocka7e24c12009-10-30 11:49:00 +00007489
Steve Block6ded16b2010-05-10 14:33:55 +01007490 Register result = r0;
7491 Register lhs = lhs_;
7492 Register rhs = rhs_;
7493
7494 // This code can't cope with other register allocations yet.
7495 ASSERT(result.is(r0) &&
7496 ((lhs.is(r0) && rhs.is(r1)) ||
7497 (lhs.is(r1) && rhs.is(r0))));
7498
7499 Register smi_test_reg = VirtualFrame::scratch0();
7500 Register scratch = VirtualFrame::scratch1();
7501
7502 // All ops need to know whether we are dealing with two Smis. Set up
7503 // smi_test_reg to tell us that.
7504 if (ShouldGenerateSmiCode()) {
7505 __ orr(smi_test_reg, lhs, Operand(rhs));
7506 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007507
7508 switch (op_) {
7509 case Token::ADD: {
7510 Label not_smi;
7511 // Fast path.
Steve Block6ded16b2010-05-10 14:33:55 +01007512 if (ShouldGenerateSmiCode()) {
7513 ASSERT(kSmiTag == 0); // Adjust code below.
7514 __ tst(smi_test_reg, Operand(kSmiTagMask));
7515 __ b(ne, &not_smi);
7516 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
7517 // Return if no overflow.
7518 __ Ret(vc);
7519 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
7520 }
7521 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
Steve Blocka7e24c12009-10-30 11:49:00 +00007522 break;
7523 }
7524
7525 case Token::SUB: {
7526 Label not_smi;
7527 // Fast path.
Steve Block6ded16b2010-05-10 14:33:55 +01007528 if (ShouldGenerateSmiCode()) {
7529 ASSERT(kSmiTag == 0); // Adjust code below.
7530 __ tst(smi_test_reg, Operand(kSmiTagMask));
7531 __ b(ne, &not_smi);
7532 if (lhs.is(r1)) {
7533 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
7534 // Return if no overflow.
7535 __ Ret(vc);
7536 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
7537 } else {
7538 __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
7539 // Return if no overflow.
7540 __ Ret(vc);
7541 __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
7542 }
7543 }
7544 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
Steve Blocka7e24c12009-10-30 11:49:00 +00007545 break;
7546 }
7547
7548 case Token::MUL: {
7549 Label not_smi, slow;
Steve Block6ded16b2010-05-10 14:33:55 +01007550 if (ShouldGenerateSmiCode()) {
7551 ASSERT(kSmiTag == 0); // adjust code below
7552 __ tst(smi_test_reg, Operand(kSmiTagMask));
7553 Register scratch2 = smi_test_reg;
7554 smi_test_reg = no_reg;
7555 __ b(ne, &not_smi);
7556 // Remove tag from one operand (but keep sign), so that result is Smi.
7557 __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
7558 // Do multiplication
7559 // scratch = lower 32 bits of ip * lhs.
7560 __ smull(scratch, scratch2, lhs, ip);
7561 // Go slow on overflows (overflow bit is not set).
7562 __ mov(ip, Operand(scratch, ASR, 31));
7563 // No overflow if higher 33 bits are identical.
7564 __ cmp(ip, Operand(scratch2));
7565 __ b(ne, &slow);
7566 // Go slow on zero result to handle -0.
7567 __ tst(scratch, Operand(scratch));
7568 __ mov(result, Operand(scratch), LeaveCC, ne);
7569 __ Ret(ne);
7570 // We need -0 if we were multiplying a negative number with 0 to get 0.
7571 // We know one of them was zero.
7572 __ add(scratch2, rhs, Operand(lhs), SetCC);
7573 __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
7574 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
7575 // Slow case. We fall through here if we multiplied a negative number
7576 // with 0, because that would mean we should produce -0.
7577 __ bind(&slow);
7578 }
7579 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
Steve Blocka7e24c12009-10-30 11:49:00 +00007580 break;
7581 }
7582
7583 case Token::DIV:
7584 case Token::MOD: {
7585 Label not_smi;
Steve Block6ded16b2010-05-10 14:33:55 +01007586 if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007587 Label smi_is_unsuitable;
Steve Block6ded16b2010-05-10 14:33:55 +01007588 __ BranchOnNotSmi(lhs, &not_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00007589 if (IsPowerOf2(constant_rhs_)) {
7590 if (op_ == Token::MOD) {
Steve Block6ded16b2010-05-10 14:33:55 +01007591 __ and_(rhs,
7592 lhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007593 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
7594 SetCC);
7595 // We now have the answer, but if the input was negative we also
7596 // have the sign bit. Our work is done if the result is
7597 // positive or zero:
Steve Block6ded16b2010-05-10 14:33:55 +01007598 if (!rhs.is(r0)) {
7599 __ mov(r0, rhs, LeaveCC, pl);
7600 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007601 __ Ret(pl);
7602 // A mod of a negative left hand side must return a negative number.
7603 // Unfortunately if the answer is 0 then we must return -0. And we
Steve Block6ded16b2010-05-10 14:33:55 +01007604 // already optimistically trashed rhs so we may need to restore it.
7605 __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00007606 // Next two instructions are conditional on the answer being -0.
Steve Block6ded16b2010-05-10 14:33:55 +01007607 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
Steve Blocka7e24c12009-10-30 11:49:00 +00007608 __ b(eq, &smi_is_unsuitable);
7609 // We need to subtract the dividend. Eg. -3 % 4 == -3.
Steve Block6ded16b2010-05-10 14:33:55 +01007610 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
Steve Blocka7e24c12009-10-30 11:49:00 +00007611 } else {
7612 ASSERT(op_ == Token::DIV);
Steve Block6ded16b2010-05-10 14:33:55 +01007613 __ tst(lhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007614 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
7615 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
7616 int shift = 0;
7617 int d = constant_rhs_;
7618 while ((d & 1) == 0) {
7619 d >>= 1;
7620 shift++;
7621 }
Steve Block6ded16b2010-05-10 14:33:55 +01007622 __ mov(r0, Operand(lhs, LSR, shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007623 __ bic(r0, r0, Operand(kSmiTagMask));
7624 }
7625 } else {
7626 // Not a power of 2.
Steve Block6ded16b2010-05-10 14:33:55 +01007627 __ tst(lhs, Operand(0x80000000u));
Steve Blocka7e24c12009-10-30 11:49:00 +00007628 __ b(ne, &smi_is_unsuitable);
7629 // Find a fixed point reciprocal of the divisor so we can divide by
7630 // multiplying.
7631 double divisor = 1.0 / constant_rhs_;
7632 int shift = 32;
7633 double scale = 4294967296.0; // 1 << 32.
7634 uint32_t mul;
7635 // Maximise the precision of the fixed point reciprocal.
7636 while (true) {
7637 mul = static_cast<uint32_t>(scale * divisor);
7638 if (mul >= 0x7fffffff) break;
7639 scale *= 2.0;
7640 shift++;
7641 }
7642 mul++;
Steve Block6ded16b2010-05-10 14:33:55 +01007643 Register scratch2 = smi_test_reg;
7644 smi_test_reg = no_reg;
7645 __ mov(scratch2, Operand(mul));
7646 __ umull(scratch, scratch2, scratch2, lhs);
7647 __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
7648 // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
7649 // rhs is still the known rhs. rhs is Smi tagged.
7650 // lhs is still the unkown lhs. lhs is Smi tagged.
7651 int required_scratch_shift = 0; // Including the Smi tag shift of 1.
7652 // scratch = scratch2 * rhs.
Steve Blocka7e24c12009-10-30 11:49:00 +00007653 MultiplyByKnownInt2(masm,
Steve Block6ded16b2010-05-10 14:33:55 +01007654 scratch,
7655 scratch2,
7656 rhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007657 constant_rhs_,
Steve Block6ded16b2010-05-10 14:33:55 +01007658 &required_scratch_shift);
7659 // scratch << required_scratch_shift is now the Smi tagged rhs *
7660 // (lhs / rhs) where / indicates integer division.
Steve Blocka7e24c12009-10-30 11:49:00 +00007661 if (op_ == Token::DIV) {
Steve Block6ded16b2010-05-10 14:33:55 +01007662 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007663 __ b(ne, &smi_is_unsuitable); // There was a remainder.
Steve Block6ded16b2010-05-10 14:33:55 +01007664 __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007665 } else {
7666 ASSERT(op_ == Token::MOD);
Steve Block6ded16b2010-05-10 14:33:55 +01007667 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007668 }
7669 }
7670 __ Ret();
7671 __ bind(&smi_is_unsuitable);
Steve Blocka7e24c12009-10-30 11:49:00 +00007672 }
Steve Block6ded16b2010-05-10 14:33:55 +01007673 HandleBinaryOpSlowCases(
7674 masm,
7675 &not_smi,
7676 lhs,
7677 rhs,
7678 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
Steve Blocka7e24c12009-10-30 11:49:00 +00007679 break;
7680 }
7681
7682 case Token::BIT_OR:
7683 case Token::BIT_AND:
7684 case Token::BIT_XOR:
7685 case Token::SAR:
7686 case Token::SHR:
7687 case Token::SHL: {
7688 Label slow;
7689 ASSERT(kSmiTag == 0); // adjust code below
Steve Block6ded16b2010-05-10 14:33:55 +01007690 __ tst(smi_test_reg, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007691 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007692 Register scratch2 = smi_test_reg;
7693 smi_test_reg = no_reg;
Steve Blocka7e24c12009-10-30 11:49:00 +00007694 switch (op_) {
Steve Block6ded16b2010-05-10 14:33:55 +01007695 case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
7696 case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
7697 case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00007698 case Token::SAR:
7699 // Remove tags from right operand.
Steve Block6ded16b2010-05-10 14:33:55 +01007700 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7701 __ mov(result, Operand(lhs, ASR, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007702 // Smi tag result.
Steve Block6ded16b2010-05-10 14:33:55 +01007703 __ bic(result, result, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007704 break;
7705 case Token::SHR:
7706 // Remove tags from operands. We can't do this on a 31 bit number
7707 // because then the 0s get shifted into bit 30 instead of bit 31.
Steve Block6ded16b2010-05-10 14:33:55 +01007708 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
7709 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7710 __ mov(scratch, Operand(scratch, LSR, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007711 // Unsigned shift is not allowed to produce a negative number, so
7712 // check the sign bit and the sign bit after Smi tagging.
Steve Block6ded16b2010-05-10 14:33:55 +01007713 __ tst(scratch, Operand(0xc0000000));
Steve Blocka7e24c12009-10-30 11:49:00 +00007714 __ b(ne, &slow);
7715 // Smi tag result.
Steve Block6ded16b2010-05-10 14:33:55 +01007716 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007717 break;
7718 case Token::SHL:
7719 // Remove tags from operands.
Steve Block6ded16b2010-05-10 14:33:55 +01007720 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
7721 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7722 __ mov(scratch, Operand(scratch, LSL, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007723 // Check that the signed result fits in a Smi.
Steve Block6ded16b2010-05-10 14:33:55 +01007724 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00007725 __ b(mi, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007726 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007727 break;
7728 default: UNREACHABLE();
7729 }
7730 __ Ret();
7731 __ bind(&slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007732 HandleNonSmiBitwiseOp(masm, lhs, rhs);
Steve Blocka7e24c12009-10-30 11:49:00 +00007733 break;
7734 }
7735
7736 default: UNREACHABLE();
7737 }
7738 // This code should be unreachable.
7739 __ stop("Unreachable");
Steve Block6ded16b2010-05-10 14:33:55 +01007740
7741 // Generate an unreachable reference to the DEFAULT stub so that it can be
7742 // found at the end of this stub when clearing ICs at GC.
7743 // TODO(kaznacheev): Check performance impact and get rid of this.
7744 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
7745 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
7746 __ CallStub(&uninit);
7747 }
7748}
7749
7750
7751void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
7752 Label get_result;
7753
7754 __ Push(r1, r0);
7755
7756 // Internal frame is necessary to handle exceptions properly.
7757 __ EnterInternalFrame();
7758 // Call the stub proper to get the result in r0.
7759 __ Call(&get_result);
7760 __ LeaveInternalFrame();
7761
7762 __ push(r0);
7763
7764 __ mov(r0, Operand(Smi::FromInt(MinorKey())));
7765 __ push(r0);
7766 __ mov(r0, Operand(Smi::FromInt(op_)));
7767 __ push(r0);
7768 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
7769 __ push(r0);
7770
7771 __ TailCallExternalReference(
7772 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
7773 6,
7774 1);
7775
7776 // The entry point for the result calculation is assumed to be immediately
7777 // after this sequence.
7778 __ bind(&get_result);
7779}
7780
7781
7782Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
7783 GenericBinaryOpStub stub(key, type_info);
7784 return stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00007785}
7786
7787
7788void StackCheckStub::Generate(MacroAssembler* masm) {
7789 // Do tail-call to runtime routine. Runtime routines expect at least one
7790 // argument, so give it a Smi.
7791 __ mov(r0, Operand(Smi::FromInt(0)));
7792 __ push(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01007793 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00007794
7795 __ StubReturn(1);
7796}
7797
7798
Leon Clarkee46be812010-01-19 14:06:41 +00007799void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Leon Clarke4515c472010-02-03 11:58:03 +00007800 Label slow, done;
Leon Clarkee46be812010-01-19 14:06:41 +00007801
Leon Clarke4515c472010-02-03 11:58:03 +00007802 if (op_ == Token::SUB) {
7803 // Check whether the value is a smi.
7804 Label try_float;
7805 __ tst(r0, Operand(kSmiTagMask));
7806 __ b(ne, &try_float);
Steve Blocka7e24c12009-10-30 11:49:00 +00007807
Leon Clarke4515c472010-02-03 11:58:03 +00007808 // Go slow case if the value of the expression is zero
7809 // to make sure that we switch between 0 and -0.
7810 __ cmp(r0, Operand(0));
7811 __ b(eq, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007812
Leon Clarke4515c472010-02-03 11:58:03 +00007813 // The value of the expression is a smi that is not zero. Try
7814 // optimistic subtraction '0 - value'.
7815 __ rsb(r1, r0, Operand(0), SetCC);
7816 __ b(vs, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007817
Leon Clarke4515c472010-02-03 11:58:03 +00007818 __ mov(r0, Operand(r1)); // Set r0 to result.
7819 __ b(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00007820
Leon Clarke4515c472010-02-03 11:58:03 +00007821 __ bind(&try_float);
7822 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
7823 __ b(ne, &slow);
7824 // r0 is a heap number. Get a new heap number in r1.
7825 if (overwrite_) {
7826 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7827 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
7828 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7829 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01007830 __ AllocateHeapNumber(r1, r2, r3, &slow);
Leon Clarke4515c472010-02-03 11:58:03 +00007831 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
7832 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7833 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
7834 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
7835 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
7836 __ mov(r0, Operand(r1));
7837 }
7838 } else if (op_ == Token::BIT_NOT) {
7839 // Check if the operand is a heap number.
7840 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
7841 __ b(ne, &slow);
7842
7843 // Convert the heap number is r0 to an untagged integer in r1.
7844 GetInt32(masm, r0, r1, r2, r3, &slow);
7845
7846 // Do the bitwise operation (move negated) and check if the result
7847 // fits in a smi.
7848 Label try_float;
7849 __ mvn(r1, Operand(r1));
7850 __ add(r2, r1, Operand(0x40000000), SetCC);
7851 __ b(mi, &try_float);
7852 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
7853 __ b(&done);
7854
7855 __ bind(&try_float);
7856 if (!overwrite_) {
7857 // Allocate a fresh heap number, but don't overwrite r0 until
7858 // we're sure we can do it without going through the slow case
7859 // that needs the value in r0.
Steve Block6ded16b2010-05-10 14:33:55 +01007860 __ AllocateHeapNumber(r2, r3, r4, &slow);
Leon Clarke4515c472010-02-03 11:58:03 +00007861 __ mov(r0, Operand(r2));
7862 }
7863
7864 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
7865 // have to set up a frame.
7866 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
7867 __ push(lr);
7868 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
7869 __ pop(lr);
7870 } else {
7871 UNIMPLEMENTED();
7872 }
7873
7874 __ bind(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00007875 __ StubReturn(1);
7876
Leon Clarke4515c472010-02-03 11:58:03 +00007877 // Handle the slow case by jumping to the JavaScript builtin.
Steve Blocka7e24c12009-10-30 11:49:00 +00007878 __ bind(&slow);
7879 __ push(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00007880 switch (op_) {
7881 case Token::SUB:
7882 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
7883 break;
7884 case Token::BIT_NOT:
7885 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
7886 break;
7887 default:
7888 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00007889 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007890}
7891
7892
7893void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
7894 // r0 holds the exception.
7895
7896 // Adjust this code if not the case.
7897 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7898
7899 // Drop the sp to the top of the handler.
7900 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
7901 __ ldr(sp, MemOperand(r3));
7902
7903 // Restore the next handler and frame pointer, discard handler state.
7904 ASSERT(StackHandlerConstants::kNextOffset == 0);
7905 __ pop(r2);
7906 __ str(r2, MemOperand(r3));
7907 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
7908 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
7909
7910 // Before returning we restore the context from the frame pointer if
7911 // not NULL. The frame pointer is NULL in the exception handler of a
7912 // JS entry frame.
7913 __ cmp(fp, Operand(0));
7914 // Set cp to NULL if fp is NULL.
7915 __ mov(cp, Operand(0), LeaveCC, eq);
7916 // Restore cp otherwise.
7917 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
7918#ifdef DEBUG
7919 if (FLAG_debug_code) {
7920 __ mov(lr, Operand(pc));
7921 }
7922#endif
7923 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7924 __ pop(pc);
7925}
7926
7927
7928void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
7929 UncatchableExceptionType type) {
7930 // Adjust this code if not the case.
7931 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7932
7933 // Drop sp to the top stack handler.
7934 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
7935 __ ldr(sp, MemOperand(r3));
7936
7937 // Unwind the handlers until the ENTRY handler is found.
7938 Label loop, done;
7939 __ bind(&loop);
7940 // Load the type of the current stack handler.
7941 const int kStateOffset = StackHandlerConstants::kStateOffset;
7942 __ ldr(r2, MemOperand(sp, kStateOffset));
7943 __ cmp(r2, Operand(StackHandler::ENTRY));
7944 __ b(eq, &done);
7945 // Fetch the next handler in the list.
7946 const int kNextOffset = StackHandlerConstants::kNextOffset;
7947 __ ldr(sp, MemOperand(sp, kNextOffset));
7948 __ jmp(&loop);
7949 __ bind(&done);
7950
7951 // Set the top handler address to next handler past the current ENTRY handler.
7952 ASSERT(StackHandlerConstants::kNextOffset == 0);
7953 __ pop(r2);
7954 __ str(r2, MemOperand(r3));
7955
7956 if (type == OUT_OF_MEMORY) {
7957 // Set external caught exception to false.
7958 ExternalReference external_caught(Top::k_external_caught_exception_address);
7959 __ mov(r0, Operand(false));
7960 __ mov(r2, Operand(external_caught));
7961 __ str(r0, MemOperand(r2));
7962
7963 // Set pending exception and r0 to out of memory exception.
7964 Failure* out_of_memory = Failure::OutOfMemoryException();
7965 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
7966 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
7967 __ str(r0, MemOperand(r2));
7968 }
7969
7970 // Stack layout at this point. See also StackHandlerConstants.
7971 // sp -> state (ENTRY)
7972 // fp
7973 // lr
7974
7975 // Discard handler state (r2 is not used) and restore frame pointer.
7976 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
7977 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
7978 // Before returning we restore the context from the frame pointer if
7979 // not NULL. The frame pointer is NULL in the exception handler of a
7980 // JS entry frame.
7981 __ cmp(fp, Operand(0));
7982 // Set cp to NULL if fp is NULL.
7983 __ mov(cp, Operand(0), LeaveCC, eq);
7984 // Restore cp otherwise.
7985 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
7986#ifdef DEBUG
7987 if (FLAG_debug_code) {
7988 __ mov(lr, Operand(pc));
7989 }
7990#endif
7991 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7992 __ pop(pc);
7993}
7994
7995
7996void CEntryStub::GenerateCore(MacroAssembler* masm,
7997 Label* throw_normal_exception,
7998 Label* throw_termination_exception,
7999 Label* throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008000 bool do_gc,
Steve Block6ded16b2010-05-10 14:33:55 +01008001 bool always_allocate,
8002 int frame_alignment_skew) {
Steve Blocka7e24c12009-10-30 11:49:00 +00008003 // r0: result parameter for PerformGC, if any
8004 // r4: number of arguments including receiver (C callee-saved)
8005 // r5: pointer to builtin function (C callee-saved)
8006 // r6: pointer to the first argument (C callee-saved)
8007
8008 if (do_gc) {
8009 // Passing r0.
Steve Block6ded16b2010-05-10 14:33:55 +01008010 __ PrepareCallCFunction(1, r1);
8011 __ CallCFunction(ExternalReference::perform_gc_function(), 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00008012 }
8013
8014 ExternalReference scope_depth =
8015 ExternalReference::heap_always_allocate_scope_depth();
8016 if (always_allocate) {
8017 __ mov(r0, Operand(scope_depth));
8018 __ ldr(r1, MemOperand(r0));
8019 __ add(r1, r1, Operand(1));
8020 __ str(r1, MemOperand(r0));
8021 }
8022
8023 // Call C built-in.
8024 // r0 = argc, r1 = argv
8025 __ mov(r0, Operand(r4));
8026 __ mov(r1, Operand(r6));
8027
Steve Block6ded16b2010-05-10 14:33:55 +01008028 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
8029 int frame_alignment_mask = frame_alignment - 1;
8030#if defined(V8_HOST_ARCH_ARM)
8031 if (FLAG_debug_code) {
8032 if (frame_alignment > kPointerSize) {
8033 Label alignment_as_expected;
8034 ASSERT(IsPowerOf2(frame_alignment));
8035 __ sub(r2, sp, Operand(frame_alignment_skew));
8036 __ tst(r2, Operand(frame_alignment_mask));
8037 __ b(eq, &alignment_as_expected);
8038 // Don't use Check here, as it will call Runtime_Abort re-entering here.
8039 __ stop("Unexpected alignment");
8040 __ bind(&alignment_as_expected);
8041 }
8042 }
8043#endif
8044
8045 // Just before the call (jump) below lr is pushed, so the actual alignment is
8046 // adding one to the current skew.
8047 int alignment_before_call =
8048 (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
8049 if (alignment_before_call > 0) {
8050 // Push until the alignment before the call is met.
8051 __ mov(r2, Operand(0));
8052 for (int i = alignment_before_call;
8053 (i & frame_alignment_mask) != 0;
8054 i += kPointerSize) {
8055 __ push(r2);
8056 }
8057 }
8058
Steve Blocka7e24c12009-10-30 11:49:00 +00008059 // TODO(1242173): To let the GC traverse the return address of the exit
8060 // frames, we need to know where the return address is. Right now,
8061 // we push it on the stack to be able to find it again, but we never
8062 // restore from it in case of changes, which makes it impossible to
8063 // support moving the C entry code stub. This should be fixed, but currently
8064 // this is OK because the CEntryStub gets generated so early in the V8 boot
8065 // sequence that it is not moving ever.
Steve Block6ded16b2010-05-10 14:33:55 +01008066 masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
Steve Blocka7e24c12009-10-30 11:49:00 +00008067 masm->push(lr);
8068 masm->Jump(r5);
8069
Steve Block6ded16b2010-05-10 14:33:55 +01008070 // Restore sp back to before aligning the stack.
8071 if (alignment_before_call > 0) {
8072 __ add(sp, sp, Operand(alignment_before_call));
8073 }
8074
Steve Blocka7e24c12009-10-30 11:49:00 +00008075 if (always_allocate) {
8076 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
8077 // though (contain the result).
8078 __ mov(r2, Operand(scope_depth));
8079 __ ldr(r3, MemOperand(r2));
8080 __ sub(r3, r3, Operand(1));
8081 __ str(r3, MemOperand(r2));
8082 }
8083
8084 // check for failure result
8085 Label failure_returned;
8086 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
8087 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
8088 __ add(r2, r0, Operand(1));
8089 __ tst(r2, Operand(kFailureTagMask));
8090 __ b(eq, &failure_returned);
8091
8092 // Exit C frame and return.
8093 // r0:r1: result
8094 // sp: stack pointer
8095 // fp: frame pointer
Leon Clarke4515c472010-02-03 11:58:03 +00008096 __ LeaveExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00008097
8098 // check if we should retry or throw exception
8099 Label retry;
8100 __ bind(&failure_returned);
8101 ASSERT(Failure::RETRY_AFTER_GC == 0);
8102 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
8103 __ b(eq, &retry);
8104
8105 // Special handling of out of memory exceptions.
8106 Failure* out_of_memory = Failure::OutOfMemoryException();
8107 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
8108 __ b(eq, throw_out_of_memory_exception);
8109
8110 // Retrieve the pending exception and clear the variable.
8111 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8112 __ ldr(r3, MemOperand(ip));
8113 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8114 __ ldr(r0, MemOperand(ip));
8115 __ str(r3, MemOperand(ip));
8116
8117 // Special handling of termination exceptions which are uncatchable
8118 // by javascript code.
8119 __ cmp(r0, Operand(Factory::termination_exception()));
8120 __ b(eq, throw_termination_exception);
8121
8122 // Handle normal exception.
8123 __ jmp(throw_normal_exception);
8124
8125 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
8126}
8127
8128
Leon Clarke4515c472010-02-03 11:58:03 +00008129void CEntryStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00008130 // Called from JavaScript; parameters are on stack as if calling JS function
8131 // r0: number of arguments including receiver
8132 // r1: pointer to builtin function
8133 // fp: frame pointer (restored after C call)
8134 // sp: stack pointer (restored as callee's sp after C call)
8135 // cp: current context (C callee-saved)
8136
Leon Clarke4515c472010-02-03 11:58:03 +00008137 // Result returned in r0 or r0+r1 by default.
8138
Steve Blocka7e24c12009-10-30 11:49:00 +00008139 // NOTE: Invocations of builtins may return failure objects
8140 // instead of a proper result. The builtin entry handles
8141 // this by performing a garbage collection and retrying the
8142 // builtin once.
8143
Steve Blocka7e24c12009-10-30 11:49:00 +00008144 // Enter the exit frame that transitions from JavaScript to C++.
Leon Clarke4515c472010-02-03 11:58:03 +00008145 __ EnterExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00008146
8147 // r4: number of arguments (C callee-saved)
8148 // r5: pointer to builtin function (C callee-saved)
8149 // r6: pointer to first argument (C callee-saved)
8150
8151 Label throw_normal_exception;
8152 Label throw_termination_exception;
8153 Label throw_out_of_memory_exception;
8154
8155 // Call into the runtime system.
8156 GenerateCore(masm,
8157 &throw_normal_exception,
8158 &throw_termination_exception,
8159 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008160 false,
Steve Block6ded16b2010-05-10 14:33:55 +01008161 false,
8162 -kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00008163
8164 // Do space-specific GC and retry runtime call.
8165 GenerateCore(masm,
8166 &throw_normal_exception,
8167 &throw_termination_exception,
8168 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008169 true,
Steve Block6ded16b2010-05-10 14:33:55 +01008170 false,
8171 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00008172
8173 // Do full GC and retry runtime call one final time.
8174 Failure* failure = Failure::InternalError();
8175 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
8176 GenerateCore(masm,
8177 &throw_normal_exception,
8178 &throw_termination_exception,
8179 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008180 true,
Steve Block6ded16b2010-05-10 14:33:55 +01008181 true,
8182 kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00008183
8184 __ bind(&throw_out_of_memory_exception);
8185 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
8186
8187 __ bind(&throw_termination_exception);
8188 GenerateThrowUncatchable(masm, TERMINATION);
8189
8190 __ bind(&throw_normal_exception);
8191 GenerateThrowTOS(masm);
8192}
8193
8194
8195void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
8196 // r0: code entry
8197 // r1: function
8198 // r2: receiver
8199 // r3: argc
8200 // [sp+0]: argv
8201
8202 Label invoke, exit;
8203
8204 // Called from C, so do not pop argc and args on exit (preserve sp)
8205 // No need to save register-passed args
8206 // Save callee-saved registers (incl. cp and fp), sp, and lr
8207 __ stm(db_w, sp, kCalleeSaved | lr.bit());
8208
8209 // Get address of argv, see stm above.
8210 // r0: code entry
8211 // r1: function
8212 // r2: receiver
8213 // r3: argc
Leon Clarke4515c472010-02-03 11:58:03 +00008214 __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
Steve Blocka7e24c12009-10-30 11:49:00 +00008215
8216 // Push a frame with special values setup to mark it as an entry frame.
8217 // r0: code entry
8218 // r1: function
8219 // r2: receiver
8220 // r3: argc
8221 // r4: argv
8222 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
8223 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
8224 __ mov(r7, Operand(Smi::FromInt(marker)));
8225 __ mov(r6, Operand(Smi::FromInt(marker)));
8226 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8227 __ ldr(r5, MemOperand(r5));
Steve Block6ded16b2010-05-10 14:33:55 +01008228 __ Push(r8, r7, r6, r5);
Steve Blocka7e24c12009-10-30 11:49:00 +00008229
8230 // Setup frame pointer for the frame to be pushed.
8231 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8232
8233 // Call a faked try-block that does the invoke.
8234 __ bl(&invoke);
8235
8236 // Caught exception: Store result (exception) in the pending
8237 // exception field in the JSEnv and return a failure sentinel.
8238 // Coming in here the fp will be invalid because the PushTryHandler below
8239 // sets it to 0 to signal the existence of the JSEntry frame.
8240 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8241 __ str(r0, MemOperand(ip));
8242 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
8243 __ b(&exit);
8244
8245 // Invoke: Link this frame into the handler chain.
8246 __ bind(&invoke);
8247 // Must preserve r0-r4, r5-r7 are available.
8248 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
8249 // If an exception not caught by another handler occurs, this handler
8250 // returns control to the code after the bl(&invoke) above, which
8251 // restores all kCalleeSaved registers (including cp and fp) to their
8252 // saved values before returning a failure to C.
8253
8254 // Clear any pending exceptions.
8255 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8256 __ ldr(r5, MemOperand(ip));
8257 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8258 __ str(r5, MemOperand(ip));
8259
8260 // Invoke the function by calling through JS entry trampoline builtin.
8261 // Notice that we cannot store a reference to the trampoline code directly in
8262 // this stub, because runtime stubs are not traversed when doing GC.
8263
8264 // Expected registers by Builtins::JSEntryTrampoline
8265 // r0: code entry
8266 // r1: function
8267 // r2: receiver
8268 // r3: argc
8269 // r4: argv
8270 if (is_construct) {
8271 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
8272 __ mov(ip, Operand(construct_entry));
8273 } else {
8274 ExternalReference entry(Builtins::JSEntryTrampoline);
8275 __ mov(ip, Operand(entry));
8276 }
8277 __ ldr(ip, MemOperand(ip)); // deref address
8278
8279 // Branch and link to JSEntryTrampoline. We don't use the double underscore
8280 // macro for the add instruction because we don't want the coverage tool
8281 // inserting instructions here after we read the pc.
8282 __ mov(lr, Operand(pc));
8283 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
8284
8285 // Unlink this frame from the handler chain. When reading the
8286 // address of the next handler, there is no need to use the address
8287 // displacement since the current stack pointer (sp) points directly
8288 // to the stack handler.
8289 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
8290 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
8291 __ str(r3, MemOperand(ip));
8292 // No need to restore registers
8293 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
8294
8295
8296 __ bind(&exit); // r0 holds result
8297 // Restore the top frame descriptors from the stack.
8298 __ pop(r3);
8299 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8300 __ str(r3, MemOperand(ip));
8301
8302 // Reset the stack to the callee saved registers.
8303 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8304
8305 // Restore callee-saved registers and return.
8306#ifdef DEBUG
8307 if (FLAG_debug_code) {
8308 __ mov(lr, Operand(pc));
8309 }
8310#endif
8311 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
8312}
8313
8314
8315// This stub performs an instanceof, calling the builtin function if
8316// necessary. Uses r1 for the object, r0 for the function that it may
8317// be an instance of (these are fetched from the stack).
8318void InstanceofStub::Generate(MacroAssembler* masm) {
8319 // Get the object - slow case for smis (we may need to throw an exception
8320 // depending on the rhs).
8321 Label slow, loop, is_instance, is_not_instance;
8322 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
8323 __ BranchOnSmi(r0, &slow);
8324
8325 // Check that the left hand is a JS object and put map in r3.
8326 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
8327 __ b(lt, &slow);
8328 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
8329 __ b(gt, &slow);
8330
8331 // Get the prototype of the function (r4 is result, r2 is scratch).
Andrei Popescu402d9372010-02-26 13:31:12 +00008332 __ ldr(r1, MemOperand(sp, 0));
Kristian Monsen25f61362010-05-21 11:50:48 +01008333 // r1 is function, r3 is map.
8334
8335 // Look up the function and the map in the instanceof cache.
8336 Label miss;
8337 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
8338 __ cmp(r1, ip);
8339 __ b(ne, &miss);
8340 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
8341 __ cmp(r3, ip);
8342 __ b(ne, &miss);
8343 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
8344 __ pop();
8345 __ pop();
8346 __ mov(pc, Operand(lr));
8347
8348 __ bind(&miss);
Steve Blocka7e24c12009-10-30 11:49:00 +00008349 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
8350
8351 // Check that the function prototype is a JS object.
8352 __ BranchOnSmi(r4, &slow);
8353 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
8354 __ b(lt, &slow);
8355 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
8356 __ b(gt, &slow);
8357
Kristian Monsen25f61362010-05-21 11:50:48 +01008358 __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
8359 __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
8360
Steve Blocka7e24c12009-10-30 11:49:00 +00008361 // Register mapping: r3 is object map and r4 is function prototype.
8362 // Get prototype of object into r2.
8363 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
8364
8365 // Loop through the prototype chain looking for the function prototype.
8366 __ bind(&loop);
8367 __ cmp(r2, Operand(r4));
8368 __ b(eq, &is_instance);
8369 __ LoadRoot(ip, Heap::kNullValueRootIndex);
8370 __ cmp(r2, ip);
8371 __ b(eq, &is_not_instance);
8372 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
8373 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
8374 __ jmp(&loop);
8375
8376 __ bind(&is_instance);
8377 __ mov(r0, Operand(Smi::FromInt(0)));
Kristian Monsen25f61362010-05-21 11:50:48 +01008378 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00008379 __ pop();
8380 __ pop();
8381 __ mov(pc, Operand(lr)); // Return.
8382
8383 __ bind(&is_not_instance);
8384 __ mov(r0, Operand(Smi::FromInt(1)));
Kristian Monsen25f61362010-05-21 11:50:48 +01008385 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00008386 __ pop();
8387 __ pop();
8388 __ mov(pc, Operand(lr)); // Return.
8389
8390 // Slow-case. Tail call builtin.
8391 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00008392 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
8393}
8394
8395
Steve Blocka7e24c12009-10-30 11:49:00 +00008396void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
8397 // The displacement is the offset of the last parameter (if any)
8398 // relative to the frame pointer.
8399 static const int kDisplacement =
8400 StandardFrameConstants::kCallerSPOffset - kPointerSize;
8401
8402 // Check that the key is a smi.
8403 Label slow;
8404 __ BranchOnNotSmi(r1, &slow);
8405
8406 // Check if the calling frame is an arguments adaptor frame.
8407 Label adaptor;
8408 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
8409 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
8410 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
8411 __ b(eq, &adaptor);
8412
8413 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00008414 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00008415 // check for free.
8416 __ cmp(r1, r0);
8417 __ b(cs, &slow);
8418
8419 // Read the argument from the stack and return it.
8420 __ sub(r3, r0, r1);
8421 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
8422 __ ldr(r0, MemOperand(r3, kDisplacement));
8423 __ Jump(lr);
8424
8425 // Arguments adaptor case: Check index against actual arguments
8426 // limit found in the arguments adaptor frame. Use unsigned
8427 // comparison to get negative check for free.
8428 __ bind(&adaptor);
8429 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
8430 __ cmp(r1, r0);
8431 __ b(cs, &slow);
8432
8433 // Read the argument from the adaptor frame and return it.
8434 __ sub(r3, r0, r1);
8435 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
8436 __ ldr(r0, MemOperand(r3, kDisplacement));
8437 __ Jump(lr);
8438
8439 // Slow-case: Handle non-smi or out-of-bounds access to arguments
8440 // by calling the runtime system.
8441 __ bind(&slow);
8442 __ push(r1);
Steve Block6ded16b2010-05-10 14:33:55 +01008443 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00008444}
8445
8446
8447void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Andrei Popescu402d9372010-02-26 13:31:12 +00008448 // sp[0] : number of parameters
8449 // sp[4] : receiver displacement
8450 // sp[8] : function
8451
Steve Blocka7e24c12009-10-30 11:49:00 +00008452 // Check if the calling frame is an arguments adaptor frame.
Andrei Popescu402d9372010-02-26 13:31:12 +00008453 Label adaptor_frame, try_allocate, runtime;
Steve Blocka7e24c12009-10-30 11:49:00 +00008454 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
8455 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
8456 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Andrei Popescu402d9372010-02-26 13:31:12 +00008457 __ b(eq, &adaptor_frame);
8458
8459 // Get the length from the frame.
8460 __ ldr(r1, MemOperand(sp, 0));
8461 __ b(&try_allocate);
Steve Blocka7e24c12009-10-30 11:49:00 +00008462
8463 // Patch the arguments.length and the parameters pointer.
Andrei Popescu402d9372010-02-26 13:31:12 +00008464 __ bind(&adaptor_frame);
8465 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
8466 __ str(r1, MemOperand(sp, 0));
8467 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00008468 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
8469 __ str(r3, MemOperand(sp, 1 * kPointerSize));
8470
Andrei Popescu402d9372010-02-26 13:31:12 +00008471 // Try the new space allocation. Start out with computing the size
Kristian Monsen25f61362010-05-21 11:50:48 +01008472 // of the arguments object and the elements array in words.
Andrei Popescu402d9372010-02-26 13:31:12 +00008473 Label add_arguments_object;
8474 __ bind(&try_allocate);
8475 __ cmp(r1, Operand(0));
8476 __ b(eq, &add_arguments_object);
8477 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
8478 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
8479 __ bind(&add_arguments_object);
8480 __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
8481
8482 // Do the allocation of both objects in one go.
Kristian Monsen25f61362010-05-21 11:50:48 +01008483 __ AllocateInNewSpace(
8484 r1,
8485 r0,
8486 r2,
8487 r3,
8488 &runtime,
8489 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
Andrei Popescu402d9372010-02-26 13:31:12 +00008490
8491 // Get the arguments boilerplate from the current (global) context.
8492 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
8493 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
8494 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
8495 __ ldr(r4, MemOperand(r4, offset));
8496
8497 // Copy the JS object part.
8498 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
8499 __ ldr(r3, FieldMemOperand(r4, i));
8500 __ str(r3, FieldMemOperand(r0, i));
8501 }
8502
8503 // Setup the callee in-object property.
8504 ASSERT(Heap::arguments_callee_index == 0);
8505 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
8506 __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
8507
8508 // Get the length (smi tagged) and set that as an in-object property too.
8509 ASSERT(Heap::arguments_length_index == 1);
8510 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
8511 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
8512
8513 // If there are no actual arguments, we're done.
8514 Label done;
8515 __ cmp(r1, Operand(0));
8516 __ b(eq, &done);
8517
8518 // Get the parameters pointer from the stack and untag the length.
8519 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
8520 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
8521
8522 // Setup the elements pointer in the allocated arguments object and
8523 // initialize the header in the elements fixed array.
8524 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
8525 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
8526 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
8527 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
8528 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
8529
8530 // Copy the fixed array slots.
8531 Label loop;
8532 // Setup r4 to point to the first array slot.
8533 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
8534 __ bind(&loop);
8535 // Pre-decrement r2 with kPointerSize on each iteration.
8536 // Pre-decrement in order to skip receiver.
8537 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
8538 // Post-increment r4 with kPointerSize on each iteration.
8539 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
8540 __ sub(r1, r1, Operand(1));
8541 __ cmp(r1, Operand(0));
8542 __ b(ne, &loop);
8543
8544 // Return and remove the on-stack parameters.
8545 __ bind(&done);
8546 __ add(sp, sp, Operand(3 * kPointerSize));
8547 __ Ret();
8548
Steve Blocka7e24c12009-10-30 11:49:00 +00008549 // Do the runtime call to allocate the arguments object.
8550 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008551 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
8552}
8553
8554
8555void RegExpExecStub::Generate(MacroAssembler* masm) {
8556 // Just jump directly to runtime if native RegExp is not selected at compile
8557 // time or if regexp entry in generated code is turned off runtime switch or
8558 // at compilation.
Kristian Monsen25f61362010-05-21 11:50:48 +01008559#ifdef V8_INTERPRETED_REGEXP
Steve Block6ded16b2010-05-10 14:33:55 +01008560 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01008561#else // V8_INTERPRETED_REGEXP
Steve Block6ded16b2010-05-10 14:33:55 +01008562 if (!FLAG_regexp_entry_native) {
8563 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
8564 return;
8565 }
8566
8567 // Stack frame on entry.
8568 // sp[0]: last_match_info (expected JSArray)
8569 // sp[4]: previous index
8570 // sp[8]: subject string
8571 // sp[12]: JSRegExp object
8572
8573 static const int kLastMatchInfoOffset = 0 * kPointerSize;
8574 static const int kPreviousIndexOffset = 1 * kPointerSize;
8575 static const int kSubjectOffset = 2 * kPointerSize;
8576 static const int kJSRegExpOffset = 3 * kPointerSize;
8577
8578 Label runtime, invoke_regexp;
8579
8580 // Allocation of registers for this function. These are in callee save
8581 // registers and will be preserved by the call to the native RegExp code, as
8582 // this code is called using the normal C calling convention. When calling
8583 // directly from generated code the native RegExp code will not do a GC and
8584 // therefore the content of these registers are safe to use after the call.
8585 Register subject = r4;
8586 Register regexp_data = r5;
8587 Register last_match_info_elements = r6;
8588
8589 // Ensure that a RegExp stack is allocated.
8590 ExternalReference address_of_regexp_stack_memory_address =
8591 ExternalReference::address_of_regexp_stack_memory_address();
8592 ExternalReference address_of_regexp_stack_memory_size =
8593 ExternalReference::address_of_regexp_stack_memory_size();
8594 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
8595 __ ldr(r0, MemOperand(r0, 0));
8596 __ tst(r0, Operand(r0));
8597 __ b(eq, &runtime);
8598
8599 // Check that the first argument is a JSRegExp object.
8600 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
8601 ASSERT_EQ(0, kSmiTag);
8602 __ tst(r0, Operand(kSmiTagMask));
8603 __ b(eq, &runtime);
8604 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
8605 __ b(ne, &runtime);
8606
8607 // Check that the RegExp has been compiled (data contains a fixed array).
8608 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
8609 if (FLAG_debug_code) {
8610 __ tst(regexp_data, Operand(kSmiTagMask));
8611 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
8612 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
8613 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
8614 }
8615
8616 // regexp_data: RegExp data (FixedArray)
8617 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
8618 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
8619 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
8620 __ b(ne, &runtime);
8621
8622 // regexp_data: RegExp data (FixedArray)
8623 // Check that the number of captures fit in the static offsets vector buffer.
8624 __ ldr(r2,
8625 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
8626 // Calculate number of capture registers (number_of_captures + 1) * 2. This
8627 // uses the asumption that smis are 2 * their untagged value.
8628 ASSERT_EQ(0, kSmiTag);
8629 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
8630 __ add(r2, r2, Operand(2)); // r2 was a smi.
8631 // Check that the static offsets vector buffer is large enough.
8632 __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
8633 __ b(hi, &runtime);
8634
8635 // r2: Number of capture registers
8636 // regexp_data: RegExp data (FixedArray)
8637 // Check that the second argument is a string.
8638 __ ldr(subject, MemOperand(sp, kSubjectOffset));
8639 __ tst(subject, Operand(kSmiTagMask));
8640 __ b(eq, &runtime);
8641 Condition is_string = masm->IsObjectStringType(subject, r0);
8642 __ b(NegateCondition(is_string), &runtime);
8643 // Get the length of the string to r3.
8644 __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
8645
8646 // r2: Number of capture registers
8647 // r3: Length of subject string as a smi
8648 // subject: Subject string
8649 // regexp_data: RegExp data (FixedArray)
8650 // Check that the third argument is a positive smi less than the subject
8651 // string length. A negative value will be greater (unsigned comparison).
8652 __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
8653 __ tst(r0, Operand(kSmiTagMask));
Kristian Monsen25f61362010-05-21 11:50:48 +01008654 __ b(ne, &runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008655 __ cmp(r3, Operand(r0));
Kristian Monsen25f61362010-05-21 11:50:48 +01008656 __ b(ls, &runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008657
8658 // r2: Number of capture registers
8659 // subject: Subject string
8660 // regexp_data: RegExp data (FixedArray)
8661 // Check that the fourth object is a JSArray object.
8662 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
8663 __ tst(r0, Operand(kSmiTagMask));
8664 __ b(eq, &runtime);
8665 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
8666 __ b(ne, &runtime);
8667 // Check that the JSArray is in fast case.
8668 __ ldr(last_match_info_elements,
8669 FieldMemOperand(r0, JSArray::kElementsOffset));
8670 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01008671 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01008672 __ cmp(r0, ip);
8673 __ b(ne, &runtime);
8674 // Check that the last match info has space for the capture registers and the
8675 // additional information.
8676 __ ldr(r0,
8677 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
8678 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
8679 __ cmp(r2, r0);
8680 __ b(gt, &runtime);
8681
8682 // subject: Subject string
8683 // regexp_data: RegExp data (FixedArray)
8684 // Check the representation and encoding of the subject string.
8685 Label seq_string;
8686 const int kStringRepresentationEncodingMask =
8687 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
8688 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
8689 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
8690 __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
8691 // First check for sequential string.
8692 ASSERT_EQ(0, kStringTag);
8693 ASSERT_EQ(0, kSeqStringTag);
8694 __ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
8695 __ b(eq, &seq_string);
8696
8697 // subject: Subject string
8698 // regexp_data: RegExp data (FixedArray)
8699 // Check for flat cons string.
8700 // A flat cons string is a cons string where the second part is the empty
8701 // string. In that case the subject string is just the first part of the cons
8702 // string. Also in this case the first part of the cons string is known to be
8703 // a sequential string or an external string.
8704 __ and_(r0, r0, Operand(kStringRepresentationMask));
8705 __ cmp(r0, Operand(kConsStringTag));
8706 __ b(ne, &runtime);
8707 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
8708 __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
8709 __ cmp(r0, r1);
8710 __ b(ne, &runtime);
8711 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
8712 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
8713 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
8714 ASSERT_EQ(0, kSeqStringTag);
8715 __ tst(r0, Operand(kStringRepresentationMask));
8716 __ b(nz, &runtime);
8717 __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
8718
8719 __ bind(&seq_string);
8720 // r1: suject string type & kStringRepresentationEncodingMask
8721 // subject: Subject string
8722 // regexp_data: RegExp data (FixedArray)
8723 // Check that the irregexp code has been generated for an ascii string. If
8724 // it has, the field contains a code object otherwise it contains the hole.
8725#ifdef DEBUG
8726 const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
8727 const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
8728 CHECK_EQ(4, kSeqAsciiString);
8729 CHECK_EQ(0, kSeqTwoByteString);
8730#endif
8731 // Find the code object based on the assumptions above.
8732 __ mov(r3, Operand(r1, ASR, 2), SetCC);
8733 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
8734 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
8735
8736 // Check that the irregexp code has been generated for the actual string
8737 // encoding. If it has, the field contains a code object otherwise it contains
8738 // the hole.
8739 __ CompareObjectType(r7, r0, r0, CODE_TYPE);
8740 __ b(ne, &runtime);
8741
8742 // r3: encoding of subject string (1 if ascii, 0 if two_byte);
8743 // r7: code
8744 // subject: Subject string
8745 // regexp_data: RegExp data (FixedArray)
8746 // Load used arguments before starting to push arguments for call to native
8747 // RegExp code to avoid handling changing stack height.
8748 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
8749 __ mov(r1, Operand(r1, ASR, kSmiTagSize));
8750
8751 // r1: previous index
8752 // r3: encoding of subject string (1 if ascii, 0 if two_byte);
8753 // r7: code
8754 // subject: Subject string
8755 // regexp_data: RegExp data (FixedArray)
8756 // All checks done. Now push arguments for native regexp code.
8757 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
8758
8759 static const int kRegExpExecuteArguments = 7;
8760 __ push(lr);
8761 __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
8762
8763 // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
8764 __ mov(r0, Operand(1));
8765 __ str(r0, MemOperand(sp, 2 * kPointerSize));
8766
8767 // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
8768 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
8769 __ ldr(r0, MemOperand(r0, 0));
8770 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
8771 __ ldr(r2, MemOperand(r2, 0));
8772 __ add(r0, r0, Operand(r2));
8773 __ str(r0, MemOperand(sp, 1 * kPointerSize));
8774
8775 // Argument 5 (sp[0]): static offsets vector buffer.
8776 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
8777 __ str(r0, MemOperand(sp, 0 * kPointerSize));
8778
8779 // For arguments 4 and 3 get string length, calculate start of string data and
8780 // calculate the shift of the index (0 for ASCII and 1 for two byte).
8781 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
8782 __ mov(r0, Operand(r0, ASR, kSmiTagSize));
8783 ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
8784 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
8785 __ eor(r3, r3, Operand(1));
8786 // Argument 4 (r3): End of string data
8787 // Argument 3 (r2): Start of string data
8788 __ add(r2, r9, Operand(r1, LSL, r3));
8789 __ add(r3, r9, Operand(r0, LSL, r3));
8790
8791 // Argument 2 (r1): Previous index.
8792 // Already there
8793
8794 // Argument 1 (r0): Subject string.
8795 __ mov(r0, subject);
8796
8797 // Locate the code entry and call it.
8798 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
8799 __ CallCFunction(r7, kRegExpExecuteArguments);
8800 __ pop(lr);
8801
8802 // r0: result
8803 // subject: subject string (callee saved)
8804 // regexp_data: RegExp data (callee saved)
8805 // last_match_info_elements: Last match info elements (callee saved)
8806
8807 // Check the result.
8808 Label success;
8809 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
8810 __ b(eq, &success);
8811 Label failure;
8812 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
8813 __ b(eq, &failure);
8814 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
8815 // If not exception it can only be retry. Handle that in the runtime system.
8816 __ b(ne, &runtime);
8817 // Result must now be exception. If there is no pending exception already a
8818 // stack overflow (on the backtrack stack) was detected in RegExp code but
8819 // haven't created the exception yet. Handle that in the runtime system.
8820 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
8821 __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
8822 __ ldr(r0, MemOperand(r0, 0));
8823 __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
8824 __ ldr(r1, MemOperand(r1, 0));
8825 __ cmp(r0, r1);
8826 __ b(eq, &runtime);
8827 __ bind(&failure);
8828 // For failure and exception return null.
8829 __ mov(r0, Operand(Factory::null_value()));
8830 __ add(sp, sp, Operand(4 * kPointerSize));
8831 __ Ret();
8832
8833 // Process the result from the native regexp code.
8834 __ bind(&success);
8835 __ ldr(r1,
8836 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
8837 // Calculate number of capture registers (number_of_captures + 1) * 2.
8838 ASSERT_EQ(0, kSmiTag);
8839 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
8840 __ add(r1, r1, Operand(2)); // r1 was a smi.
8841
8842 // r1: number of capture registers
8843 // r4: subject string
8844 // Store the capture count.
8845 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
8846 __ str(r2, FieldMemOperand(last_match_info_elements,
8847 RegExpImpl::kLastCaptureCountOffset));
8848 // Store last subject and last input.
8849 __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
8850 __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
8851 __ str(subject,
8852 FieldMemOperand(last_match_info_elements,
8853 RegExpImpl::kLastSubjectOffset));
8854 __ RecordWrite(r3, r2, r7);
8855 __ str(subject,
8856 FieldMemOperand(last_match_info_elements,
8857 RegExpImpl::kLastInputOffset));
8858 __ mov(r3, last_match_info_elements);
8859 __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
8860 __ RecordWrite(r3, r2, r7);
8861
8862 // Get the static offsets vector filled by the native regexp code.
8863 ExternalReference address_of_static_offsets_vector =
8864 ExternalReference::address_of_static_offsets_vector();
8865 __ mov(r2, Operand(address_of_static_offsets_vector));
8866
8867 // r1: number of capture registers
8868 // r2: offsets vector
8869 Label next_capture, done;
8870 // Capture register counter starts from number of capture registers and
8871 // counts down until wraping after zero.
8872 __ add(r0,
8873 last_match_info_elements,
8874 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
8875 __ bind(&next_capture);
8876 __ sub(r1, r1, Operand(1), SetCC);
8877 __ b(mi, &done);
8878 // Read the value from the static offsets vector buffer.
8879 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
8880 // Store the smi value in the last match info.
8881 __ mov(r3, Operand(r3, LSL, kSmiTagSize));
8882 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
8883 __ jmp(&next_capture);
8884 __ bind(&done);
8885
8886 // Return last match info.
8887 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
8888 __ add(sp, sp, Operand(4 * kPointerSize));
8889 __ Ret();
8890
8891 // Do the runtime call to execute the regexp.
8892 __ bind(&runtime);
8893 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01008894#endif // V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00008895}
8896
8897
8898void CallFunctionStub::Generate(MacroAssembler* masm) {
8899 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00008900
8901 // If the receiver might be a value (string, number or boolean) check for this
8902 // and box it if it is.
8903 if (ReceiverMightBeValue()) {
8904 // Get the receiver from the stack.
8905 // function, receiver [, arguments]
8906 Label receiver_is_value, receiver_is_js_object;
8907 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
8908
8909 // Check if receiver is a smi (which is a number value).
8910 __ BranchOnSmi(r1, &receiver_is_value);
8911
8912 // Check if the receiver is a valid JS object.
8913 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
8914 __ b(ge, &receiver_is_js_object);
8915
8916 // Call the runtime to box the value.
8917 __ bind(&receiver_is_value);
8918 __ EnterInternalFrame();
8919 __ push(r1);
8920 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
8921 __ LeaveInternalFrame();
8922 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
8923
8924 __ bind(&receiver_is_js_object);
8925 }
8926
Steve Blocka7e24c12009-10-30 11:49:00 +00008927 // Get the function to call from the stack.
8928 // function, receiver [, arguments]
8929 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
8930
8931 // Check that the function is really a JavaScript function.
8932 // r1: pushed function (to be verified)
8933 __ BranchOnSmi(r1, &slow);
8934 // Get the map of the function object.
8935 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
8936 __ b(ne, &slow);
8937
8938 // Fast-case: Invoke the function now.
8939 // r1: pushed function
8940 ParameterCount actual(argc_);
8941 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
8942
8943 // Slow-case: Non-function called.
8944 __ bind(&slow);
Andrei Popescu402d9372010-02-26 13:31:12 +00008945 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
8946 // of the original receiver from the call site).
8947 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00008948 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
8949 __ mov(r2, Operand(0));
8950 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
8951 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
8952 RelocInfo::CODE_TARGET);
8953}
8954
8955
Steve Block6ded16b2010-05-10 14:33:55 +01008956// Unfortunately you have to run without snapshots to see most of these
8957// names in the profile since most compare stubs end up in the snapshot.
Leon Clarkee46be812010-01-19 14:06:41 +00008958const char* CompareStub::GetName() {
Steve Block6ded16b2010-05-10 14:33:55 +01008959 if (name_ != NULL) return name_;
8960 const int kMaxNameLength = 100;
8961 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
8962 if (name_ == NULL) return "OOM";
8963
8964 const char* cc_name;
Leon Clarkee46be812010-01-19 14:06:41 +00008965 switch (cc_) {
Steve Block6ded16b2010-05-10 14:33:55 +01008966 case lt: cc_name = "LT"; break;
8967 case gt: cc_name = "GT"; break;
8968 case le: cc_name = "LE"; break;
8969 case ge: cc_name = "GE"; break;
8970 case eq: cc_name = "EQ"; break;
8971 case ne: cc_name = "NE"; break;
8972 default: cc_name = "UnknownCondition"; break;
Leon Clarkee46be812010-01-19 14:06:41 +00008973 }
Steve Block6ded16b2010-05-10 14:33:55 +01008974
8975 const char* strict_name = "";
8976 if (strict_ && (cc_ == eq || cc_ == ne)) {
8977 strict_name = "_STRICT";
8978 }
8979
8980 const char* never_nan_nan_name = "";
8981 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
8982 never_nan_nan_name = "_NO_NAN";
8983 }
8984
8985 const char* include_number_compare_name = "";
8986 if (!include_number_compare_) {
8987 include_number_compare_name = "_NO_NUMBER";
8988 }
8989
8990 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
8991 "CompareStub_%s%s%s%s",
8992 cc_name,
8993 strict_name,
8994 never_nan_nan_name,
8995 include_number_compare_name);
8996 return name_;
Leon Clarkee46be812010-01-19 14:06:41 +00008997}
8998
8999
Steve Blocka7e24c12009-10-30 11:49:00 +00009000int CompareStub::MinorKey() {
Steve Block6ded16b2010-05-10 14:33:55 +01009001 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
9002 // stubs the never NaN NaN condition is only taken into account if the
9003 // condition is equals.
9004 ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
9005 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
9006 | StrictField::encode(strict_)
9007 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
9008 | IncludeNumberCompareField::encode(include_number_compare_);
Steve Blocka7e24c12009-10-30 11:49:00 +00009009}
9010
9011
Steve Block6ded16b2010-05-10 14:33:55 +01009012void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
9013 Register object,
9014 Register index,
9015 Register scratch,
9016 Register result,
9017 Label* receiver_not_string,
9018 Label* index_not_smi,
9019 Label* index_out_of_range,
9020 Label* slow_case) {
9021 Label not_a_flat_string;
9022 Label try_again_with_new_string;
9023 Label ascii_string;
9024 Label got_char_code;
9025
9026 // If the receiver is a smi trigger the non-string case.
9027 __ BranchOnSmi(object, receiver_not_string);
9028
9029 // Fetch the instance type of the receiver into result register.
9030 __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
9031 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
9032 // If the receiver is not a string trigger the non-string case.
9033 __ tst(result, Operand(kIsNotStringMask));
9034 __ b(ne, receiver_not_string);
9035
9036 // If the index is non-smi trigger the non-smi case.
9037 __ BranchOnNotSmi(index, index_not_smi);
9038
9039 // Check for index out of range.
9040 __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
9041 // Now scratch has the length of the string. Compare with the index.
9042 __ cmp(scratch, Operand(index));
9043 __ b(ls, index_out_of_range);
9044
9045 __ bind(&try_again_with_new_string);
9046 // ----------- S t a t e -------------
9047 // -- object : string to access
9048 // -- result : instance type of the string
9049 // -- scratch : non-negative index < length
9050 // -----------------------------------
9051
9052 // We need special handling for non-flat strings.
9053 ASSERT_EQ(0, kSeqStringTag);
9054 __ tst(result, Operand(kStringRepresentationMask));
9055 __ b(ne, &not_a_flat_string);
9056
9057 // Check for 1-byte or 2-byte string.
9058 ASSERT_EQ(0, kTwoByteStringTag);
9059 __ tst(result, Operand(kStringEncodingMask));
9060 __ b(ne, &ascii_string);
9061
9062 // 2-byte string. We can add without shifting since the Smi tag size is the
9063 // log2 of the number of bytes in a two-byte character.
9064 ASSERT_EQ(1, kSmiTagSize);
9065 ASSERT_EQ(0, kSmiShiftSize);
9066 __ add(scratch, object, Operand(index));
9067 __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
9068 __ jmp(&got_char_code);
9069
9070 // Handle non-flat strings.
9071 __ bind(&not_a_flat_string);
9072 __ and_(result, result, Operand(kStringRepresentationMask));
9073 __ cmp(result, Operand(kConsStringTag));
9074 __ b(ne, slow_case);
9075
9076 // ConsString.
9077 // Check whether the right hand side is the empty string (i.e. if
9078 // this is really a flat string in a cons string). If that is not
9079 // the case we would rather go to the runtime system now to flatten
9080 // the string.
9081 __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
9082 __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
9083 __ cmp(result, Operand(scratch));
9084 __ b(ne, slow_case);
9085
9086 // Get the first of the two strings and load its instance type.
9087 __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
9088 __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
9089 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
9090 __ jmp(&try_again_with_new_string);
9091
9092 // ASCII string.
9093 __ bind(&ascii_string);
9094 __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
9095 __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
9096
9097 __ bind(&got_char_code);
9098 __ mov(result, Operand(result, LSL, kSmiTagSize));
9099}
9100
9101
9102void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
9103 Register code,
9104 Register scratch,
9105 Register result,
9106 InvokeFlag flag) {
9107 ASSERT(!code.is(result));
9108
9109 Label slow_case;
9110 Label exit;
9111
9112 // Fast case of Heap::LookupSingleCharacterStringFromCode.
9113 ASSERT(kSmiTag == 0);
9114 ASSERT(kSmiShiftSize == 0);
9115 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
9116 __ tst(code, Operand(kSmiTagMask |
9117 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
9118 __ b(nz, &slow_case);
9119
9120 ASSERT(kSmiTag == 0);
9121 __ mov(result, Operand(Factory::single_character_string_cache()));
9122 __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
9123 __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
9124 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
9125 __ cmp(result, scratch);
9126 __ b(eq, &slow_case);
9127 __ b(&exit);
9128
9129 __ bind(&slow_case);
9130 if (flag == CALL_FUNCTION) {
9131 __ push(code);
9132 __ CallRuntime(Runtime::kCharFromCode, 1);
9133 if (!result.is(r0)) {
9134 __ mov(result, r0);
9135 }
9136 } else {
9137 ASSERT(flag == JUMP_FUNCTION);
9138 ASSERT(result.is(r0));
9139 __ push(code);
9140 __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
9141 }
9142
9143 __ bind(&exit);
9144 if (flag == JUMP_FUNCTION) {
9145 ASSERT(result.is(r0));
9146 __ Ret();
9147 }
9148}
9149
9150
9151void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
9152 Register dest,
9153 Register src,
9154 Register count,
9155 Register scratch,
9156 bool ascii) {
Andrei Popescu31002712010-02-23 13:46:05 +00009157 Label loop;
9158 Label done;
9159 // This loop just copies one character at a time, as it is only used for very
9160 // short strings.
9161 if (!ascii) {
9162 __ add(count, count, Operand(count), SetCC);
9163 } else {
9164 __ cmp(count, Operand(0));
9165 }
9166 __ b(eq, &done);
9167
9168 __ bind(&loop);
9169 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
9170 // Perform sub between load and dependent store to get the load time to
9171 // complete.
9172 __ sub(count, count, Operand(1), SetCC);
9173 __ strb(scratch, MemOperand(dest, 1, PostIndex));
9174 // last iteration.
9175 __ b(gt, &loop);
9176
9177 __ bind(&done);
9178}
9179
9180
9181enum CopyCharactersFlags {
9182 COPY_ASCII = 1,
9183 DEST_ALWAYS_ALIGNED = 2
9184};
9185
9186
Steve Block6ded16b2010-05-10 14:33:55 +01009187void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
9188 Register dest,
9189 Register src,
9190 Register count,
9191 Register scratch1,
9192 Register scratch2,
9193 Register scratch3,
9194 Register scratch4,
9195 Register scratch5,
9196 int flags) {
Andrei Popescu31002712010-02-23 13:46:05 +00009197 bool ascii = (flags & COPY_ASCII) != 0;
9198 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
9199
9200 if (dest_always_aligned && FLAG_debug_code) {
9201 // Check that destination is actually word aligned if the flag says
9202 // that it is.
9203 __ tst(dest, Operand(kPointerAlignmentMask));
9204 __ Check(eq, "Destination of copy not aligned.");
9205 }
9206
9207 const int kReadAlignment = 4;
9208 const int kReadAlignmentMask = kReadAlignment - 1;
9209 // Ensure that reading an entire aligned word containing the last character
9210 // of a string will not read outside the allocated area (because we pad up
9211 // to kObjectAlignment).
9212 ASSERT(kObjectAlignment >= kReadAlignment);
9213 // Assumes word reads and writes are little endian.
9214 // Nothing to do for zero characters.
9215 Label done;
9216 if (!ascii) {
9217 __ add(count, count, Operand(count), SetCC);
9218 } else {
9219 __ cmp(count, Operand(0));
9220 }
9221 __ b(eq, &done);
9222
9223 // Assume that you cannot read (or write) unaligned.
9224 Label byte_loop;
9225 // Must copy at least eight bytes, otherwise just do it one byte at a time.
9226 __ cmp(count, Operand(8));
9227 __ add(count, dest, Operand(count));
9228 Register limit = count; // Read until src equals this.
9229 __ b(lt, &byte_loop);
9230
9231 if (!dest_always_aligned) {
9232 // Align dest by byte copying. Copies between zero and three bytes.
9233 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
9234 Label dest_aligned;
9235 __ b(eq, &dest_aligned);
9236 __ cmp(scratch4, Operand(2));
9237 __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
9238 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
9239 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
9240 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9241 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
9242 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
9243 __ bind(&dest_aligned);
9244 }
9245
9246 Label simple_loop;
9247
9248 __ sub(scratch4, dest, Operand(src));
9249 __ and_(scratch4, scratch4, Operand(0x03), SetCC);
9250 __ b(eq, &simple_loop);
9251 // Shift register is number of bits in a source word that
9252 // must be combined with bits in the next source word in order
9253 // to create a destination word.
9254
9255 // Complex loop for src/dst that are not aligned the same way.
9256 {
9257 Label loop;
9258 __ mov(scratch4, Operand(scratch4, LSL, 3));
9259 Register left_shift = scratch4;
9260 __ and_(src, src, Operand(~3)); // Round down to load previous word.
9261 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9262 // Store the "shift" most significant bits of scratch in the least
9263 // signficant bits (i.e., shift down by (32-shift)).
9264 __ rsb(scratch2, left_shift, Operand(32));
9265 Register right_shift = scratch2;
9266 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
9267
9268 __ bind(&loop);
9269 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
9270 __ sub(scratch5, limit, Operand(dest));
9271 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
9272 __ str(scratch1, MemOperand(dest, 4, PostIndex));
9273 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
9274 // Loop if four or more bytes left to copy.
9275 // Compare to eight, because we did the subtract before increasing dst.
9276 __ sub(scratch5, scratch5, Operand(8), SetCC);
9277 __ b(ge, &loop);
9278 }
9279 // There is now between zero and three bytes left to copy (negative that
9280 // number is in scratch5), and between one and three bytes already read into
9281 // scratch1 (eight times that number in scratch4). We may have read past
9282 // the end of the string, but because objects are aligned, we have not read
9283 // past the end of the object.
9284 // Find the minimum of remaining characters to move and preloaded characters
9285 // and write those as bytes.
9286 __ add(scratch5, scratch5, Operand(4), SetCC);
9287 __ b(eq, &done);
9288 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
9289 // Move minimum of bytes read and bytes left to copy to scratch4.
9290 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
9291 // Between one and three (value in scratch5) characters already read into
9292 // scratch ready to write.
9293 __ cmp(scratch5, Operand(2));
9294 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9295 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
9296 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
9297 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
9298 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
9299 // Copy any remaining bytes.
9300 __ b(&byte_loop);
9301
9302 // Simple loop.
9303 // Copy words from src to dst, until less than four bytes left.
9304 // Both src and dest are word aligned.
9305 __ bind(&simple_loop);
9306 {
9307 Label loop;
9308 __ bind(&loop);
9309 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9310 __ sub(scratch3, limit, Operand(dest));
9311 __ str(scratch1, MemOperand(dest, 4, PostIndex));
9312 // Compare to 8, not 4, because we do the substraction before increasing
9313 // dest.
9314 __ cmp(scratch3, Operand(8));
9315 __ b(ge, &loop);
9316 }
9317
9318 // Copy bytes from src to dst until dst hits limit.
9319 __ bind(&byte_loop);
9320 __ cmp(dest, Operand(limit));
9321 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
9322 __ b(ge, &done);
9323 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9324 __ b(&byte_loop);
9325
9326 __ bind(&done);
9327}
9328
9329
Steve Block6ded16b2010-05-10 14:33:55 +01009330void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
9331 Register c1,
9332 Register c2,
9333 Register scratch1,
9334 Register scratch2,
9335 Register scratch3,
9336 Register scratch4,
9337 Register scratch5,
9338 Label* not_found) {
9339 // Register scratch3 is the general scratch register in this function.
9340 Register scratch = scratch3;
9341
9342 // Make sure that both characters are not digits as such strings has a
9343 // different hash algorithm. Don't try to look for these in the symbol table.
9344 Label not_array_index;
9345 __ sub(scratch, c1, Operand(static_cast<int>('0')));
9346 __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
9347 __ b(hi, &not_array_index);
9348 __ sub(scratch, c2, Operand(static_cast<int>('0')));
9349 __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
9350
9351 // If check failed combine both characters into single halfword.
9352 // This is required by the contract of the method: code at the
9353 // not_found branch expects this combination in c1 register
9354 __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
9355 __ b(ls, not_found);
9356
9357 __ bind(&not_array_index);
9358 // Calculate the two character string hash.
9359 Register hash = scratch1;
9360 StringHelper::GenerateHashInit(masm, hash, c1);
9361 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
9362 StringHelper::GenerateHashGetHash(masm, hash);
9363
9364 // Collect the two characters in a register.
9365 Register chars = c1;
9366 __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
9367
9368 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9369 // hash: hash of two character string.
9370
9371 // Load symbol table
9372 // Load address of first element of the symbol table.
9373 Register symbol_table = c2;
9374 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
9375
9376 // Load undefined value
9377 Register undefined = scratch4;
9378 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
9379
9380 // Calculate capacity mask from the symbol table capacity.
9381 Register mask = scratch2;
9382 __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
9383 __ mov(mask, Operand(mask, ASR, 1));
9384 __ sub(mask, mask, Operand(1));
9385
9386 // Calculate untagged address of the first element of the symbol table.
9387 Register first_symbol_table_element = symbol_table;
9388 __ add(first_symbol_table_element, symbol_table,
9389 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
9390
9391 // Registers
9392 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9393 // hash: hash of two character string
9394 // mask: capacity mask
9395 // first_symbol_table_element: address of the first element of
9396 // the symbol table
9397 // scratch: -
9398
9399 // Perform a number of probes in the symbol table.
9400 static const int kProbes = 4;
9401 Label found_in_symbol_table;
9402 Label next_probe[kProbes];
9403 for (int i = 0; i < kProbes; i++) {
9404 Register candidate = scratch5; // Scratch register contains candidate.
9405
9406 // Calculate entry in symbol table.
9407 if (i > 0) {
9408 __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
9409 } else {
9410 __ mov(candidate, hash);
9411 }
9412
9413 __ and_(candidate, candidate, Operand(mask));
9414
9415 // Load the entry from the symble table.
9416 ASSERT_EQ(1, SymbolTable::kEntrySize);
9417 __ ldr(candidate,
9418 MemOperand(first_symbol_table_element,
9419 candidate,
9420 LSL,
9421 kPointerSizeLog2));
9422
9423 // If entry is undefined no string with this hash can be found.
9424 __ cmp(candidate, undefined);
9425 __ b(eq, not_found);
9426
9427 // If length is not 2 the string is not a candidate.
9428 __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
9429 __ cmp(scratch, Operand(Smi::FromInt(2)));
9430 __ b(ne, &next_probe[i]);
9431
9432 // Check that the candidate is a non-external ascii string.
9433 __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
9434 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
9435 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
9436 &next_probe[i]);
9437
9438 // Check if the two characters match.
9439 // Assumes that word load is little endian.
9440 __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
9441 __ cmp(chars, scratch);
9442 __ b(eq, &found_in_symbol_table);
9443 __ bind(&next_probe[i]);
9444 }
9445
9446 // No matching 2 character string found by probing.
9447 __ jmp(not_found);
9448
9449 // Scratch register contains result when we fall through to here.
9450 Register result = scratch;
9451 __ bind(&found_in_symbol_table);
9452 __ Move(r0, result);
9453}
9454
9455
9456void StringHelper::GenerateHashInit(MacroAssembler* masm,
9457 Register hash,
9458 Register character) {
9459 // hash = character + (character << 10);
9460 __ add(hash, character, Operand(character, LSL, 10));
9461 // hash ^= hash >> 6;
9462 __ eor(hash, hash, Operand(hash, ASR, 6));
9463}
9464
9465
9466void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
9467 Register hash,
9468 Register character) {
9469 // hash += character;
9470 __ add(hash, hash, Operand(character));
9471 // hash += hash << 10;
9472 __ add(hash, hash, Operand(hash, LSL, 10));
9473 // hash ^= hash >> 6;
9474 __ eor(hash, hash, Operand(hash, ASR, 6));
9475}
9476
9477
9478void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
9479 Register hash) {
9480 // hash += hash << 3;
9481 __ add(hash, hash, Operand(hash, LSL, 3));
9482 // hash ^= hash >> 11;
9483 __ eor(hash, hash, Operand(hash, ASR, 11));
9484 // hash += hash << 15;
9485 __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
9486
9487 // if (hash == 0) hash = 27;
9488 __ mov(hash, Operand(27), LeaveCC, nz);
9489}
9490
9491
Andrei Popescu31002712010-02-23 13:46:05 +00009492void SubStringStub::Generate(MacroAssembler* masm) {
9493 Label runtime;
9494
9495 // Stack frame on entry.
9496 // lr: return address
9497 // sp[0]: to
9498 // sp[4]: from
9499 // sp[8]: string
9500
9501 // This stub is called from the native-call %_SubString(...), so
9502 // nothing can be assumed about the arguments. It is tested that:
9503 // "string" is a sequential string,
9504 // both "from" and "to" are smis, and
9505 // 0 <= from <= to <= string.length.
9506 // If any of these assumptions fail, we call the runtime system.
9507
9508 static const int kToOffset = 0 * kPointerSize;
9509 static const int kFromOffset = 1 * kPointerSize;
9510 static const int kStringOffset = 2 * kPointerSize;
9511
9512
9513 // Check bounds and smi-ness.
9514 __ ldr(r7, MemOperand(sp, kToOffset));
9515 __ ldr(r6, MemOperand(sp, kFromOffset));
9516 ASSERT_EQ(0, kSmiTag);
9517 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9518 // I.e., arithmetic shift right by one un-smi-tags.
9519 __ mov(r2, Operand(r7, ASR, 1), SetCC);
9520 __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
9521 // If either r2 or r6 had the smi tag bit set, then carry is set now.
9522 __ b(cs, &runtime); // Either "from" or "to" is not a smi.
9523 __ b(mi, &runtime); // From is negative.
9524
9525 __ sub(r2, r2, Operand(r3), SetCC);
9526 __ b(mi, &runtime); // Fail if from > to.
Steve Block6ded16b2010-05-10 14:33:55 +01009527 // Special handling of sub-strings of length 1 and 2. One character strings
9528 // are handled in the runtime system (looked up in the single character
9529 // cache). Two character strings are looked for in the symbol cache.
Andrei Popescu31002712010-02-23 13:46:05 +00009530 __ cmp(r2, Operand(2));
Steve Block6ded16b2010-05-10 14:33:55 +01009531 __ b(lt, &runtime);
Andrei Popescu31002712010-02-23 13:46:05 +00009532
9533 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009534 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009535 // r6: from (smi)
9536 // r7: to (smi)
9537
9538 // Make sure first argument is a sequential (or flat) string.
9539 __ ldr(r5, MemOperand(sp, kStringOffset));
9540 ASSERT_EQ(0, kSmiTag);
9541 __ tst(r5, Operand(kSmiTagMask));
9542 __ b(eq, &runtime);
9543 Condition is_string = masm->IsObjectStringType(r5, r1);
9544 __ b(NegateCondition(is_string), &runtime);
9545
9546 // r1: instance type
9547 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009548 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009549 // r5: string
9550 // r6: from (smi)
9551 // r7: to (smi)
9552 Label seq_string;
9553 __ and_(r4, r1, Operand(kStringRepresentationMask));
9554 ASSERT(kSeqStringTag < kConsStringTag);
9555 ASSERT(kExternalStringTag > kConsStringTag);
9556 __ cmp(r4, Operand(kConsStringTag));
9557 __ b(gt, &runtime); // External strings go to runtime.
9558 __ b(lt, &seq_string); // Sequential strings are handled directly.
9559
9560 // Cons string. Try to recurse (once) on the first substring.
9561 // (This adds a little more generality than necessary to handle flattened
9562 // cons strings, but not much).
9563 __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
9564 __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
9565 __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9566 __ tst(r1, Operand(kStringRepresentationMask));
9567 ASSERT_EQ(0, kSeqStringTag);
9568 __ b(ne, &runtime); // Cons and External strings go to runtime.
9569
9570 // Definitly a sequential string.
9571 __ bind(&seq_string);
9572
9573 // r1: instance type.
9574 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009575 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009576 // r5: string
9577 // r6: from (smi)
9578 // r7: to (smi)
9579 __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01009580 __ cmp(r4, Operand(r7));
Andrei Popescu31002712010-02-23 13:46:05 +00009581 __ b(lt, &runtime); // Fail if to > length.
9582
9583 // r1: instance type.
9584 // r2: result string length.
Steve Block6ded16b2010-05-10 14:33:55 +01009585 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009586 // r5: string.
9587 // r6: from offset (smi)
9588 // Check for flat ascii string.
9589 Label non_ascii_flat;
9590 __ tst(r1, Operand(kStringEncodingMask));
9591 ASSERT_EQ(0, kTwoByteStringTag);
9592 __ b(eq, &non_ascii_flat);
9593
Steve Block6ded16b2010-05-10 14:33:55 +01009594 Label result_longer_than_two;
9595 __ cmp(r2, Operand(2));
9596 __ b(gt, &result_longer_than_two);
9597
9598 // Sub string of length 2 requested.
9599 // Get the two characters forming the sub string.
9600 __ add(r5, r5, Operand(r3));
9601 __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
9602 __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
9603
9604 // Try to lookup two character string in symbol table.
9605 Label make_two_character_string;
9606 StringHelper::GenerateTwoCharacterSymbolTableProbe(
9607 masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
9608 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9609 __ add(sp, sp, Operand(3 * kPointerSize));
9610 __ Ret();
9611
9612 // r2: result string length.
9613 // r3: two characters combined into halfword in little endian byte order.
9614 __ bind(&make_two_character_string);
9615 __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
9616 __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9617 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9618 __ add(sp, sp, Operand(3 * kPointerSize));
9619 __ Ret();
9620
9621 __ bind(&result_longer_than_two);
9622
Andrei Popescu31002712010-02-23 13:46:05 +00009623 // Allocate the result.
9624 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
9625
9626 // r0: result string.
9627 // r2: result string length.
9628 // r5: string.
9629 // r6: from offset (smi)
9630 // Locate first character of result.
9631 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9632 // Locate 'from' character of string.
9633 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9634 __ add(r5, r5, Operand(r6, ASR, 1));
9635
9636 // r0: result string.
9637 // r1: first character of result string.
9638 // r2: result string length.
9639 // r5: first character of sub string to copy.
9640 ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01009641 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
9642 COPY_ASCII | DEST_ALWAYS_ALIGNED);
Andrei Popescu31002712010-02-23 13:46:05 +00009643 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9644 __ add(sp, sp, Operand(3 * kPointerSize));
9645 __ Ret();
9646
9647 __ bind(&non_ascii_flat);
9648 // r2: result string length.
9649 // r5: string.
9650 // r6: from offset (smi)
9651 // Check for flat two byte string.
9652
9653 // Allocate the result.
9654 __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
9655
9656 // r0: result string.
9657 // r2: result string length.
9658 // r5: string.
9659 // Locate first character of result.
9660 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9661 // Locate 'from' character of string.
9662 __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9663 // As "from" is a smi it is 2 times the value which matches the size of a two
9664 // byte character.
9665 __ add(r5, r5, Operand(r6));
9666
9667 // r0: result string.
9668 // r1: first character of result.
9669 // r2: result length.
9670 // r5: first character of string to copy.
9671 ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01009672 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
9673 DEST_ALWAYS_ALIGNED);
Andrei Popescu31002712010-02-23 13:46:05 +00009674 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9675 __ add(sp, sp, Operand(3 * kPointerSize));
9676 __ Ret();
9677
9678 // Just jump to runtime to create the sub string.
9679 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01009680 __ TailCallRuntime(Runtime::kSubString, 3, 1);
Andrei Popescu31002712010-02-23 13:46:05 +00009681}
Leon Clarked91b9f72010-01-27 17:25:45 +00009682
9683
9684void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
9685 Register left,
9686 Register right,
9687 Register scratch1,
9688 Register scratch2,
9689 Register scratch3,
9690 Register scratch4) {
9691 Label compare_lengths;
9692 // Find minimum length and length difference.
9693 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
9694 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
9695 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
9696 Register length_delta = scratch3;
9697 __ mov(scratch1, scratch2, LeaveCC, gt);
9698 Register min_length = scratch1;
Steve Block6ded16b2010-05-10 14:33:55 +01009699 ASSERT(kSmiTag == 0);
Leon Clarked91b9f72010-01-27 17:25:45 +00009700 __ tst(min_length, Operand(min_length));
9701 __ b(eq, &compare_lengths);
9702
Steve Block6ded16b2010-05-10 14:33:55 +01009703 // Untag smi.
9704 __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
9705
Leon Clarked91b9f72010-01-27 17:25:45 +00009706 // Setup registers so that we only need to increment one register
9707 // in the loop.
9708 __ add(scratch2, min_length,
9709 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9710 __ add(left, left, Operand(scratch2));
9711 __ add(right, right, Operand(scratch2));
9712 // Registers left and right points to the min_length character of strings.
9713 __ rsb(min_length, min_length, Operand(-1));
9714 Register index = min_length;
9715 // Index starts at -min_length.
9716
9717 {
9718 // Compare loop.
9719 Label loop;
9720 __ bind(&loop);
9721 // Compare characters.
9722 __ add(index, index, Operand(1), SetCC);
9723 __ ldrb(scratch2, MemOperand(left, index), ne);
9724 __ ldrb(scratch4, MemOperand(right, index), ne);
9725 // Skip to compare lengths with eq condition true.
9726 __ b(eq, &compare_lengths);
9727 __ cmp(scratch2, scratch4);
9728 __ b(eq, &loop);
9729 // Fallthrough with eq condition false.
9730 }
9731 // Compare lengths - strings up to min-length are equal.
9732 __ bind(&compare_lengths);
9733 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
9734 // Use zero length_delta as result.
9735 __ mov(r0, Operand(length_delta), SetCC, eq);
9736 // Fall through to here if characters compare not-equal.
9737 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
9738 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
9739 __ Ret();
9740}
9741
9742
9743void StringCompareStub::Generate(MacroAssembler* masm) {
9744 Label runtime;
9745
9746 // Stack frame on entry.
Andrei Popescu31002712010-02-23 13:46:05 +00009747 // sp[0]: right string
9748 // sp[4]: left string
9749 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
9750 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
Leon Clarked91b9f72010-01-27 17:25:45 +00009751
9752 Label not_same;
9753 __ cmp(r0, r1);
9754 __ b(ne, &not_same);
9755 ASSERT_EQ(0, EQUAL);
9756 ASSERT_EQ(0, kSmiTag);
9757 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
9758 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
9759 __ add(sp, sp, Operand(2 * kPointerSize));
9760 __ Ret();
9761
9762 __ bind(&not_same);
9763
9764 // Check that both objects are sequential ascii strings.
9765 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
9766
9767 // Compare flat ascii strings natively. Remove arguments from stack first.
9768 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
9769 __ add(sp, sp, Operand(2 * kPointerSize));
9770 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
9771
9772 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
9773 // tagged as a small integer.
9774 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01009775 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Leon Clarked91b9f72010-01-27 17:25:45 +00009776}
9777
9778
Andrei Popescu31002712010-02-23 13:46:05 +00009779void StringAddStub::Generate(MacroAssembler* masm) {
9780 Label string_add_runtime;
9781 // Stack on entry:
9782 // sp[0]: second argument.
9783 // sp[4]: first argument.
9784
9785 // Load the two arguments.
9786 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
9787 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
9788
9789 // Make sure that both arguments are strings if not known in advance.
9790 if (string_check_) {
9791 ASSERT_EQ(0, kSmiTag);
9792 __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
9793 // Load instance types.
9794 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9795 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9796 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9797 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9798 ASSERT_EQ(0, kStringTag);
9799 // If either is not a string, go to runtime.
9800 __ tst(r4, Operand(kIsNotStringMask));
9801 __ tst(r5, Operand(kIsNotStringMask), eq);
9802 __ b(ne, &string_add_runtime);
9803 }
9804
9805 // Both arguments are strings.
9806 // r0: first string
9807 // r1: second string
9808 // r4: first string instance type (if string_check_)
9809 // r5: second string instance type (if string_check_)
9810 {
9811 Label strings_not_empty;
9812 // Check if either of the strings are empty. In that case return the other.
9813 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
9814 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01009815 ASSERT(kSmiTag == 0);
9816 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
Andrei Popescu31002712010-02-23 13:46:05 +00009817 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
Steve Block6ded16b2010-05-10 14:33:55 +01009818 ASSERT(kSmiTag == 0);
9819 // Else test if second string is empty.
9820 __ cmp(r3, Operand(Smi::FromInt(0)), ne);
Andrei Popescu31002712010-02-23 13:46:05 +00009821 __ b(ne, &strings_not_empty); // If either string was empty, return r0.
9822
9823 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9824 __ add(sp, sp, Operand(2 * kPointerSize));
9825 __ Ret();
9826
9827 __ bind(&strings_not_empty);
9828 }
9829
Steve Block6ded16b2010-05-10 14:33:55 +01009830 __ mov(r2, Operand(r2, ASR, kSmiTagSize));
9831 __ mov(r3, Operand(r3, ASR, kSmiTagSize));
Andrei Popescu31002712010-02-23 13:46:05 +00009832 // Both strings are non-empty.
9833 // r0: first string
9834 // r1: second string
9835 // r2: length of first string
9836 // r3: length of second string
9837 // r4: first string instance type (if string_check_)
9838 // r5: second string instance type (if string_check_)
9839 // Look at the length of the result of adding the two strings.
Steve Block6ded16b2010-05-10 14:33:55 +01009840 Label string_add_flat_result, longer_than_two;
Andrei Popescu31002712010-02-23 13:46:05 +00009841 // Adding two lengths can't overflow.
9842 ASSERT(String::kMaxLength * 2 > String::kMaxLength);
9843 __ add(r6, r2, Operand(r3));
9844 // Use the runtime system when adding two one character strings, as it
9845 // contains optimizations for this specific case using the symbol table.
9846 __ cmp(r6, Operand(2));
Steve Block6ded16b2010-05-10 14:33:55 +01009847 __ b(ne, &longer_than_two);
9848
9849 // Check that both strings are non-external ascii strings.
9850 if (!string_check_) {
9851 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9852 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9853 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9854 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9855 }
9856 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
9857 &string_add_runtime);
9858
9859 // Get the two characters forming the sub string.
9860 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9861 __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
9862
9863 // Try to lookup two character string in symbol table. If it is not found
9864 // just allocate a new one.
9865 Label make_two_character_string;
9866 StringHelper::GenerateTwoCharacterSymbolTableProbe(
9867 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
9868 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9869 __ add(sp, sp, Operand(2 * kPointerSize));
9870 __ Ret();
9871
9872 __ bind(&make_two_character_string);
9873 // Resulting string has length 2 and first chars of two strings
9874 // are combined into single halfword in r2 register.
9875 // So we can fill resulting string without two loops by a single
9876 // halfword store instruction (which assumes that processor is
9877 // in a little endian mode)
9878 __ mov(r6, Operand(2));
9879 __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
9880 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9881 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9882 __ add(sp, sp, Operand(2 * kPointerSize));
9883 __ Ret();
9884
9885 __ bind(&longer_than_two);
Andrei Popescu31002712010-02-23 13:46:05 +00009886 // Check if resulting string will be flat.
9887 __ cmp(r6, Operand(String::kMinNonFlatLength));
9888 __ b(lt, &string_add_flat_result);
9889 // Handle exceptionally long strings in the runtime system.
9890 ASSERT((String::kMaxLength & 0x80000000) == 0);
9891 ASSERT(IsPowerOf2(String::kMaxLength + 1));
9892 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
9893 __ cmp(r6, Operand(String::kMaxLength + 1));
9894 __ b(hs, &string_add_runtime);
9895
9896 // If result is not supposed to be flat, allocate a cons string object.
9897 // If both strings are ascii the result is an ascii cons string.
9898 if (!string_check_) {
9899 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9900 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9901 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9902 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9903 }
9904 Label non_ascii, allocated;
9905 ASSERT_EQ(0, kTwoByteStringTag);
9906 __ tst(r4, Operand(kStringEncodingMask));
9907 __ tst(r5, Operand(kStringEncodingMask), ne);
9908 __ b(eq, &non_ascii);
9909
9910 // Allocate an ASCII cons string.
9911 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
9912 __ bind(&allocated);
9913 // Fill the fields of the cons string.
9914 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
9915 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
9916 __ mov(r0, Operand(r7));
9917 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9918 __ add(sp, sp, Operand(2 * kPointerSize));
9919 __ Ret();
9920
9921 __ bind(&non_ascii);
9922 // Allocate a two byte cons string.
9923 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
9924 __ jmp(&allocated);
9925
9926 // Handle creating a flat result. First check that both strings are
9927 // sequential and that they have the same encoding.
9928 // r0: first string
9929 // r1: second string
9930 // r2: length of first string
9931 // r3: length of second string
9932 // r4: first string instance type (if string_check_)
9933 // r5: second string instance type (if string_check_)
9934 // r6: sum of lengths.
9935 __ bind(&string_add_flat_result);
9936 if (!string_check_) {
9937 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9938 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9939 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9940 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9941 }
9942 // Check that both strings are sequential.
9943 ASSERT_EQ(0, kSeqStringTag);
9944 __ tst(r4, Operand(kStringRepresentationMask));
9945 __ tst(r5, Operand(kStringRepresentationMask), eq);
9946 __ b(ne, &string_add_runtime);
9947 // Now check if both strings have the same encoding (ASCII/Two-byte).
9948 // r0: first string.
9949 // r1: second string.
9950 // r2: length of first string.
9951 // r3: length of second string.
9952 // r6: sum of lengths..
9953 Label non_ascii_string_add_flat_result;
9954 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
9955 __ eor(r7, r4, Operand(r5));
9956 __ tst(r7, Operand(kStringEncodingMask));
9957 __ b(ne, &string_add_runtime);
9958 // And see if it's ASCII or two-byte.
9959 __ tst(r4, Operand(kStringEncodingMask));
9960 __ b(eq, &non_ascii_string_add_flat_result);
9961
9962 // Both strings are sequential ASCII strings. We also know that they are
9963 // short (since the sum of the lengths is less than kMinNonFlatLength).
Steve Block6ded16b2010-05-10 14:33:55 +01009964 // r6: length of resulting flat string
Andrei Popescu31002712010-02-23 13:46:05 +00009965 __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
9966 // Locate first character of result.
9967 __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9968 // Locate first character of first argument.
9969 __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9970 // r0: first character of first string.
9971 // r1: second string.
9972 // r2: length of first string.
9973 // r3: length of second string.
9974 // r6: first character of result.
9975 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +01009976 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
Andrei Popescu31002712010-02-23 13:46:05 +00009977
9978 // Load second argument and locate first character.
9979 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9980 // r1: first character of second string.
9981 // r3: length of second string.
9982 // r6: next character of result.
9983 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +01009984 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
Andrei Popescu31002712010-02-23 13:46:05 +00009985 __ mov(r0, Operand(r7));
9986 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9987 __ add(sp, sp, Operand(2 * kPointerSize));
9988 __ Ret();
9989
9990 __ bind(&non_ascii_string_add_flat_result);
9991 // Both strings are sequential two byte strings.
9992 // r0: first string.
9993 // r1: second string.
9994 // r2: length of first string.
9995 // r3: length of second string.
9996 // r6: sum of length of strings.
9997 __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
9998 // r0: first string.
9999 // r1: second string.
10000 // r2: length of first string.
10001 // r3: length of second string.
10002 // r7: result string.
10003
10004 // Locate first character of result.
10005 __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10006 // Locate first character of first argument.
10007 __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10008
10009 // r0: first character of first string.
10010 // r1: second string.
10011 // r2: length of first string.
10012 // r3: length of second string.
10013 // r6: first character of result.
10014 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +010010015 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
Andrei Popescu31002712010-02-23 13:46:05 +000010016
10017 // Locate first character of second argument.
10018 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10019
10020 // r1: first character of second string.
10021 // r3: length of second string.
10022 // r6: next character of result (after copy of first string).
10023 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +010010024 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
Andrei Popescu31002712010-02-23 13:46:05 +000010025
10026 __ mov(r0, Operand(r7));
10027 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10028 __ add(sp, sp, Operand(2 * kPointerSize));
10029 __ Ret();
10030
10031 // Just jump to runtime to add the two strings.
10032 __ bind(&string_add_runtime);
Steve Block6ded16b2010-05-10 14:33:55 +010010033 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
Andrei Popescu31002712010-02-23 13:46:05 +000010034}
10035
10036
Steve Blocka7e24c12009-10-30 11:49:00 +000010037#undef __
10038
10039} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +010010040
10041#endif // V8_TARGET_ARCH_ARM