blob: 9afefac0b3cd2f9ce0f701f6ec86fec12f7f7136 [file] [log] [blame]
Leon Clarked91b9f72010-01-27 17:25:45 +00001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
34#include "parser.h"
35#include "register-allocator-inl.h"
36#include "runtime.h"
37#include "scopes.h"
38
39
40namespace v8 {
41namespace internal {
42
43#define __ ACCESS_MASM(masm_)
44
45static void EmitIdenticalObjectComparison(MacroAssembler* masm,
46 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000047 Condition cc,
48 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000049static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000050 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000051 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
55static void MultiplyByKnownInt(MacroAssembler* masm,
56 Register source,
57 Register destination,
58 int known_int);
59static bool IsEasyToMultiplyBy(int x);
60
61
62
63// -------------------------------------------------------------------------
64// Platform-specific DeferredCode functions.
65
66void DeferredCode::SaveRegisters() {
67 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
68 int action = registers_[i];
69 if (action == kPush) {
70 __ push(RegisterAllocator::ToRegister(i));
71 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
72 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
73 }
74 }
75}
76
77
78void DeferredCode::RestoreRegisters() {
79 // Restore registers in reverse order due to the stack.
80 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
81 int action = registers_[i];
82 if (action == kPush) {
83 __ pop(RegisterAllocator::ToRegister(i));
84 } else if (action != kIgnore) {
85 action &= ~kSyncedFlag;
86 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
87 }
88 }
89}
90
91
92// -------------------------------------------------------------------------
93// CodeGenState implementation.
94
95CodeGenState::CodeGenState(CodeGenerator* owner)
96 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +000097 true_target_(NULL),
98 false_target_(NULL),
99 previous_(NULL) {
100 owner_->set_state(this);
101}
102
103
104CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 JumpTarget* true_target,
106 JumpTarget* false_target)
107 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 true_target_(true_target),
109 false_target_(false_target),
110 previous_(owner->state()) {
111 owner_->set_state(this);
112}
113
114
115CodeGenState::~CodeGenState() {
116 ASSERT(owner_->state() == this);
117 owner_->set_state(previous_);
118}
119
120
121// -------------------------------------------------------------------------
122// CodeGenerator implementation
123
Andrei Popescu31002712010-02-23 13:46:05 +0000124CodeGenerator::CodeGenerator(MacroAssembler* masm)
125 : deferred_(8),
Leon Clarke4515c472010-02-03 11:58:03 +0000126 masm_(masm),
Andrei Popescu31002712010-02-23 13:46:05 +0000127 info_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000128 frame_(NULL),
129 allocator_(NULL),
130 cc_reg_(al),
131 state_(NULL),
132 function_return_is_shadowed_(false) {
133}
134
135
Andrei Popescu31002712010-02-23 13:46:05 +0000136Scope* CodeGenerator::scope() { return info_->function()->scope(); }
137
138
Steve Blocka7e24c12009-10-30 11:49:00 +0000139// Calling conventions:
140// fp: caller's frame pointer
141// sp: stack pointer
142// r1: called JS function
143// cp: callee's context
144
Andrei Popescu31002712010-02-23 13:46:05 +0000145void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
Steve Blockd0582a62009-12-15 09:54:21 +0000146 // Record the position for debugging purposes.
Andrei Popescu31002712010-02-23 13:46:05 +0000147 CodeForFunctionPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000148
149 // Initialize state.
Andrei Popescu31002712010-02-23 13:46:05 +0000150 info_ = info;
Steve Blocka7e24c12009-10-30 11:49:00 +0000151 ASSERT(allocator_ == NULL);
152 RegisterAllocator register_allocator(this);
153 allocator_ = &register_allocator;
154 ASSERT(frame_ == NULL);
155 frame_ = new VirtualFrame();
156 cc_reg_ = al;
157 {
158 CodeGenState state(this);
159
160 // Entry:
161 // Stack: receiver, arguments
162 // lr: return address
163 // fp: caller's frame pointer
164 // sp: stack pointer
165 // r1: called JS function
166 // cp: callee's context
167 allocator_->Initialize();
Leon Clarke4515c472010-02-03 11:58:03 +0000168
Steve Blocka7e24c12009-10-30 11:49:00 +0000169#ifdef DEBUG
170 if (strlen(FLAG_stop_at) > 0 &&
Andrei Popescu31002712010-02-23 13:46:05 +0000171 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 frame_->SpillAll();
173 __ stop("stop-at");
174 }
175#endif
176
Leon Clarke4515c472010-02-03 11:58:03 +0000177 if (mode == PRIMARY) {
178 frame_->Enter();
179 // tos: code slot
180
181 // Allocate space for locals and initialize them. This also checks
182 // for stack overflow.
183 frame_->AllocateStackSlots();
184
185 VirtualFrame::SpilledScope spilled_scope;
Andrei Popescu31002712010-02-23 13:46:05 +0000186 int heap_slots = scope()->num_heap_slots();
Leon Clarke4515c472010-02-03 11:58:03 +0000187 if (heap_slots > 0) {
188 // Allocate local context.
189 // Get outer context and create a new context based on it.
190 __ ldr(r0, frame_->Function());
191 frame_->EmitPush(r0);
192 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
193 FastNewContextStub stub(heap_slots);
194 frame_->CallStub(&stub, 1);
195 } else {
196 frame_->CallRuntime(Runtime::kNewContext, 1);
197 }
198
199#ifdef DEBUG
200 JumpTarget verified_true;
201 __ cmp(r0, Operand(cp));
202 verified_true.Branch(eq);
203 __ stop("NewContext: r0 is expected to be the same as cp");
204 verified_true.Bind();
205#endif
206 // Update context local.
207 __ str(cp, frame_->Context());
208 }
209
210 // TODO(1241774): Improve this code:
211 // 1) only needed if we have a context
212 // 2) no need to recompute context ptr every single time
213 // 3) don't copy parameter operand code from SlotOperand!
214 {
215 Comment cmnt2(masm_, "[ copy context parameters into .context");
Leon Clarke4515c472010-02-03 11:58:03 +0000216 // Note that iteration order is relevant here! If we have the same
217 // parameter twice (e.g., function (x, y, x)), and that parameter
218 // needs to be copied into the context, it must be the last argument
219 // passed to the parameter that needs to be copied. This is a rare
220 // case so we don't check for it, instead we rely on the copying
221 // order: such a parameter is copied repeatedly into the same
222 // context location and thus the last value is what is seen inside
223 // the function.
Andrei Popescu31002712010-02-23 13:46:05 +0000224 for (int i = 0; i < scope()->num_parameters(); i++) {
225 Variable* par = scope()->parameter(i);
Leon Clarke4515c472010-02-03 11:58:03 +0000226 Slot* slot = par->slot();
227 if (slot != NULL && slot->type() == Slot::CONTEXT) {
Andrei Popescu31002712010-02-23 13:46:05 +0000228 ASSERT(!scope()->is_global_scope()); // No params in global scope.
Leon Clarke4515c472010-02-03 11:58:03 +0000229 __ ldr(r1, frame_->ParameterAt(i));
230 // Loads r2 with context; used below in RecordWrite.
231 __ str(r1, SlotOperand(slot, r2));
232 // Load the offset into r3.
233 int slot_offset =
234 FixedArray::kHeaderSize + slot->index() * kPointerSize;
235 __ mov(r3, Operand(slot_offset));
236 __ RecordWrite(r2, r3, r1);
237 }
238 }
239 }
240
241 // Store the arguments object. This must happen after context
242 // initialization because the arguments object may be stored in the
243 // context.
Andrei Popescu31002712010-02-23 13:46:05 +0000244 if (scope()->arguments() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000245 Comment cmnt(masm_, "[ allocate arguments object");
Andrei Popescu31002712010-02-23 13:46:05 +0000246 ASSERT(scope()->arguments_shadow() != NULL);
247 Variable* arguments = scope()->arguments()->var();
248 Variable* shadow = scope()->arguments_shadow()->var();
Leon Clarke4515c472010-02-03 11:58:03 +0000249 ASSERT(arguments != NULL && arguments->slot() != NULL);
250 ASSERT(shadow != NULL && shadow->slot() != NULL);
251 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
252 __ ldr(r2, frame_->Function());
253 // The receiver is below the arguments, the return address, and the
254 // frame pointer on the stack.
Andrei Popescu31002712010-02-23 13:46:05 +0000255 const int kReceiverDisplacement = 2 + scope()->num_parameters();
Leon Clarke4515c472010-02-03 11:58:03 +0000256 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
Andrei Popescu31002712010-02-23 13:46:05 +0000257 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Leon Clarke4515c472010-02-03 11:58:03 +0000258 frame_->Adjust(3);
259 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
260 frame_->CallStub(&stub, 3);
261 frame_->EmitPush(r0);
262 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
263 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
264 frame_->Drop(); // Value is no longer needed.
265 }
266
267 // Initialize ThisFunction reference if present.
Andrei Popescu31002712010-02-23 13:46:05 +0000268 if (scope()->is_function_scope() && scope()->function() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000269 __ mov(ip, Operand(Factory::the_hole_value()));
270 frame_->EmitPush(ip);
Andrei Popescu31002712010-02-23 13:46:05 +0000271 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
Leon Clarke4515c472010-02-03 11:58:03 +0000272 }
273 } else {
274 // When used as the secondary compiler for splitting, r1, cp,
275 // fp, and lr have been pushed on the stack. Adjust the virtual
276 // frame to match this state.
277 frame_->Adjust(4);
278 allocator_->Unuse(r1);
279 allocator_->Unuse(lr);
280 }
281
Steve Blocka7e24c12009-10-30 11:49:00 +0000282 // Initialize the function return target after the locals are set
283 // up, because it needs the expected frame height from the frame.
284 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
285 function_return_is_shadowed_ = false;
286
Steve Blocka7e24c12009-10-30 11:49:00 +0000287 // Generate code to 'execute' declarations and initialize functions
288 // (source elements). In case of an illegal redeclaration we need to
289 // handle that instead of processing the declarations.
Andrei Popescu31002712010-02-23 13:46:05 +0000290 if (scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000291 Comment cmnt(masm_, "[ illegal redeclarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000292 scope()->VisitIllegalRedeclaration(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 } else {
294 Comment cmnt(masm_, "[ declarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000295 ProcessDeclarations(scope()->declarations());
Steve Blocka7e24c12009-10-30 11:49:00 +0000296 // Bail out if a stack-overflow exception occurred when processing
297 // declarations.
298 if (HasStackOverflow()) return;
299 }
300
301 if (FLAG_trace) {
302 frame_->CallRuntime(Runtime::kTraceEnter, 0);
303 // Ignore the return value.
304 }
305
306 // Compile the body of the function in a vanilla state. Don't
307 // bother compiling all the code if the scope has an illegal
308 // redeclaration.
Andrei Popescu31002712010-02-23 13:46:05 +0000309 if (!scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000310 Comment cmnt(masm_, "[ function body");
311#ifdef DEBUG
312 bool is_builtin = Bootstrapper::IsActive();
313 bool should_trace =
314 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
315 if (should_trace) {
316 frame_->CallRuntime(Runtime::kDebugTrace, 0);
317 // Ignore the return value.
318 }
319#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000320 VisitStatementsAndSpill(info->function()->body());
Steve Blocka7e24c12009-10-30 11:49:00 +0000321 }
322 }
323
324 // Generate the return sequence if necessary.
325 if (has_valid_frame() || function_return_.is_linked()) {
326 if (!function_return_.is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000327 CodeForReturnPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000328 }
329 // exit
330 // r0: result
331 // sp: stack pointer
332 // fp: frame pointer
333 // cp: callee's context
334 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
335
336 function_return_.Bind();
337 if (FLAG_trace) {
338 // Push the return value on the stack as the parameter.
339 // Runtime::TraceExit returns the parameter as it is.
340 frame_->EmitPush(r0);
341 frame_->CallRuntime(Runtime::kTraceExit, 1);
342 }
343
344 // Add a label for checking the size of the code used for returning.
345 Label check_exit_codesize;
346 masm_->bind(&check_exit_codesize);
347
Steve Blockd0582a62009-12-15 09:54:21 +0000348 // Calculate the exact length of the return sequence and make sure that
349 // the constant pool is not emitted inside of the return sequence.
Andrei Popescu31002712010-02-23 13:46:05 +0000350 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
Steve Blockd0582a62009-12-15 09:54:21 +0000351 int return_sequence_length = Assembler::kJSReturnSequenceLength;
352 if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
353 // Additional mov instruction generated.
354 return_sequence_length++;
355 }
356 masm_->BlockConstPoolFor(return_sequence_length);
357
Steve Blocka7e24c12009-10-30 11:49:00 +0000358 // Tear down the frame which will restore the caller's frame pointer and
359 // the link register.
360 frame_->Exit();
361
362 // Here we use masm_-> instead of the __ macro to avoid the code coverage
363 // tool from instrumenting as we rely on the code size here.
Steve Blockd0582a62009-12-15 09:54:21 +0000364 masm_->add(sp, sp, Operand(sp_delta));
Steve Blocka7e24c12009-10-30 11:49:00 +0000365 masm_->Jump(lr);
366
367 // Check that the size of the code used for returning matches what is
Steve Blockd0582a62009-12-15 09:54:21 +0000368 // expected by the debugger. The add instruction above is an addressing
369 // mode 1 instruction where there are restrictions on which immediate values
370 // can be encoded in the instruction and which immediate values requires
371 // use of an additional instruction for moving the immediate to a temporary
372 // register.
373 ASSERT_EQ(return_sequence_length,
Steve Blocka7e24c12009-10-30 11:49:00 +0000374 masm_->InstructionsGeneratedSince(&check_exit_codesize));
375 }
376
377 // Code generation state must be reset.
378 ASSERT(!has_cc());
379 ASSERT(state_ == NULL);
380 ASSERT(!function_return_is_shadowed_);
381 function_return_.Unuse();
382 DeleteFrame();
383
384 // Process any deferred code using the register allocator.
385 if (!HasStackOverflow()) {
386 ProcessDeferred();
387 }
388
389 allocator_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000390}
391
392
393MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
394 // Currently, this assertion will fail if we try to assign to
395 // a constant variable that is constant because it is read-only
396 // (such as the variable referring to a named function expression).
397 // We need to implement assignments to read-only variables.
398 // Ideally, we should do this during AST generation (by converting
399 // such assignments into expression statements); however, in general
400 // we may not be able to make the decision until past AST generation,
401 // that is when the entire program is known.
402 ASSERT(slot != NULL);
403 int index = slot->index();
404 switch (slot->type()) {
405 case Slot::PARAMETER:
406 return frame_->ParameterAt(index);
407
408 case Slot::LOCAL:
409 return frame_->LocalAt(index);
410
411 case Slot::CONTEXT: {
412 // Follow the context chain if necessary.
413 ASSERT(!tmp.is(cp)); // do not overwrite context register
414 Register context = cp;
415 int chain_length = scope()->ContextChainLength(slot->var()->scope());
416 for (int i = 0; i < chain_length; i++) {
417 // Load the closure.
418 // (All contexts, even 'with' contexts, have a closure,
419 // and it is the same for all contexts inside a function.
420 // There is no need to go to the function context first.)
421 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
422 // Load the function context (which is the incoming, outer context).
423 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
424 context = tmp;
425 }
426 // We may have a 'with' context now. Get the function context.
427 // (In fact this mov may never be the needed, since the scope analysis
428 // may not permit a direct context access in this case and thus we are
429 // always at a function context. However it is safe to dereference be-
430 // cause the function context of a function context is itself. Before
431 // deleting this mov we should try to create a counter-example first,
432 // though...)
433 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
434 return ContextOperand(tmp, index);
435 }
436
437 default:
438 UNREACHABLE();
439 return MemOperand(r0, 0);
440 }
441}
442
443
444MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
445 Slot* slot,
446 Register tmp,
447 Register tmp2,
448 JumpTarget* slow) {
449 ASSERT(slot->type() == Slot::CONTEXT);
450 Register context = cp;
451
452 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
453 if (s->num_heap_slots() > 0) {
454 if (s->calls_eval()) {
455 // Check that extension is NULL.
456 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
457 __ tst(tmp2, tmp2);
458 slow->Branch(ne);
459 }
460 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
461 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
462 context = tmp;
463 }
464 }
465 // Check that last extension is NULL.
466 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
467 __ tst(tmp2, tmp2);
468 slow->Branch(ne);
469 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
470 return ContextOperand(tmp, slot->index());
471}
472
473
474// Loads a value on TOS. If it is a boolean value, the result may have been
475// (partially) translated into branches, or it may have set the condition
476// code register. If force_cc is set, the value is forced to set the
477// condition code register and no value is pushed. If the condition code
478// register was set, has_cc() is true and cc_reg_ contains the condition to
479// test for 'true'.
480void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 JumpTarget* true_target,
482 JumpTarget* false_target,
483 bool force_cc) {
484 ASSERT(!has_cc());
485 int original_height = frame_->height();
486
Steve Blockd0582a62009-12-15 09:54:21 +0000487 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000488 Visit(x);
489
490 // If we hit a stack overflow, we may not have actually visited
491 // the expression. In that case, we ensure that we have a
492 // valid-looking frame state because we will continue to generate
493 // code as we unwind the C++ stack.
494 //
495 // It's possible to have both a stack overflow and a valid frame
496 // state (eg, a subexpression overflowed, visiting it returned
497 // with a dummied frame state, and visiting this expression
498 // returned with a normal-looking state).
499 if (HasStackOverflow() &&
500 has_valid_frame() &&
501 !has_cc() &&
502 frame_->height() == original_height) {
503 true_target->Jump();
504 }
505 }
506 if (force_cc && frame_ != NULL && !has_cc()) {
507 // Convert the TOS value to a boolean in the condition code register.
508 ToBoolean(true_target, false_target);
509 }
510 ASSERT(!force_cc || !has_valid_frame() || has_cc());
511 ASSERT(!has_valid_frame() ||
512 (has_cc() && frame_->height() == original_height) ||
513 (!has_cc() && frame_->height() == original_height + 1));
514}
515
516
Steve Blockd0582a62009-12-15 09:54:21 +0000517void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000518#ifdef DEBUG
519 int original_height = frame_->height();
520#endif
521 JumpTarget true_target;
522 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000523 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000524
525 if (has_cc()) {
526 // Convert cc_reg_ into a boolean value.
527 JumpTarget loaded;
528 JumpTarget materialize_true;
529 materialize_true.Branch(cc_reg_);
530 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
531 frame_->EmitPush(r0);
532 loaded.Jump();
533 materialize_true.Bind();
534 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
535 frame_->EmitPush(r0);
536 loaded.Bind();
537 cc_reg_ = al;
538 }
539
540 if (true_target.is_linked() || false_target.is_linked()) {
541 // We have at least one condition value that has been "translated"
542 // into a branch, thus it needs to be loaded explicitly.
543 JumpTarget loaded;
544 if (frame_ != NULL) {
545 loaded.Jump(); // Don't lose the current TOS.
546 }
547 bool both = true_target.is_linked() && false_target.is_linked();
548 // Load "true" if necessary.
549 if (true_target.is_linked()) {
550 true_target.Bind();
551 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
552 frame_->EmitPush(r0);
553 }
554 // If both "true" and "false" need to be loaded jump across the code for
555 // "false".
556 if (both) {
557 loaded.Jump();
558 }
559 // Load "false" if necessary.
560 if (false_target.is_linked()) {
561 false_target.Bind();
562 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
563 frame_->EmitPush(r0);
564 }
565 // A value is loaded on all paths reaching this point.
566 loaded.Bind();
567 }
568 ASSERT(has_valid_frame());
569 ASSERT(!has_cc());
570 ASSERT(frame_->height() == original_height + 1);
571}
572
573
574void CodeGenerator::LoadGlobal() {
575 VirtualFrame::SpilledScope spilled_scope;
576 __ ldr(r0, GlobalObject());
577 frame_->EmitPush(r0);
578}
579
580
581void CodeGenerator::LoadGlobalReceiver(Register scratch) {
582 VirtualFrame::SpilledScope spilled_scope;
583 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
584 __ ldr(scratch,
585 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
586 frame_->EmitPush(scratch);
587}
588
589
Steve Blockd0582a62009-12-15 09:54:21 +0000590void CodeGenerator::LoadTypeofExpression(Expression* expr) {
591 // Special handling of identifiers as subexpressions of typeof.
Steve Blocka7e24c12009-10-30 11:49:00 +0000592 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +0000593 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000594 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000595 // For a global variable we build the property reference
596 // <global>.<variable> and perform a (regular non-contextual) property
597 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000598 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
599 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000600 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000601 Reference ref(this, &property);
602 ref.GetValueAndSpill();
603 } else if (variable != NULL && variable->slot() != NULL) {
604 // For a variable that rewrites to a slot, we signal it is the immediate
605 // subexpression of a typeof.
606 LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
607 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000608 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000609 // Anything else can be handled normally.
610 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000611 }
612}
613
614
Leon Clarked91b9f72010-01-27 17:25:45 +0000615Reference::Reference(CodeGenerator* cgen,
616 Expression* expression,
617 bool persist_after_get)
618 : cgen_(cgen),
619 expression_(expression),
620 type_(ILLEGAL),
621 persist_after_get_(persist_after_get) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000622 cgen->LoadReference(this);
623}
624
625
626Reference::~Reference() {
Leon Clarked91b9f72010-01-27 17:25:45 +0000627 ASSERT(is_unloaded() || is_illegal());
Steve Blocka7e24c12009-10-30 11:49:00 +0000628}
629
630
631void CodeGenerator::LoadReference(Reference* ref) {
632 VirtualFrame::SpilledScope spilled_scope;
633 Comment cmnt(masm_, "[ LoadReference");
634 Expression* e = ref->expression();
635 Property* property = e->AsProperty();
636 Variable* var = e->AsVariableProxy()->AsVariable();
637
638 if (property != NULL) {
639 // The expression is either a property or a variable proxy that rewrites
640 // to a property.
641 LoadAndSpill(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000642 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000643 ref->set_type(Reference::NAMED);
644 } else {
645 LoadAndSpill(property->key());
646 ref->set_type(Reference::KEYED);
647 }
648 } else if (var != NULL) {
649 // The expression is a variable proxy that does not rewrite to a
650 // property. Global variables are treated as named property references.
651 if (var->is_global()) {
652 LoadGlobal();
653 ref->set_type(Reference::NAMED);
654 } else {
655 ASSERT(var->slot() != NULL);
656 ref->set_type(Reference::SLOT);
657 }
658 } else {
659 // Anything else is a runtime error.
660 LoadAndSpill(e);
661 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
662 }
663}
664
665
666void CodeGenerator::UnloadReference(Reference* ref) {
667 VirtualFrame::SpilledScope spilled_scope;
668 // Pop a reference from the stack while preserving TOS.
669 Comment cmnt(masm_, "[ UnloadReference");
670 int size = ref->size();
671 if (size > 0) {
672 frame_->EmitPop(r0);
673 frame_->Drop(size);
674 frame_->EmitPush(r0);
675 }
Leon Clarked91b9f72010-01-27 17:25:45 +0000676 ref->set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +0000677}
678
679
680// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
681// register to a boolean in the condition code register. The code
682// may jump to 'false_target' in case the register converts to 'false'.
683void CodeGenerator::ToBoolean(JumpTarget* true_target,
684 JumpTarget* false_target) {
685 VirtualFrame::SpilledScope spilled_scope;
686 // Note: The generated code snippet does not change stack variables.
687 // Only the condition code should be set.
688 frame_->EmitPop(r0);
689
690 // Fast case checks
691
692 // Check if the value is 'false'.
693 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
694 __ cmp(r0, ip);
695 false_target->Branch(eq);
696
697 // Check if the value is 'true'.
698 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
699 __ cmp(r0, ip);
700 true_target->Branch(eq);
701
702 // Check if the value is 'undefined'.
703 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
704 __ cmp(r0, ip);
705 false_target->Branch(eq);
706
707 // Check if the value is a smi.
708 __ cmp(r0, Operand(Smi::FromInt(0)));
709 false_target->Branch(eq);
710 __ tst(r0, Operand(kSmiTagMask));
711 true_target->Branch(eq);
712
713 // Slow case: call the runtime.
714 frame_->EmitPush(r0);
715 frame_->CallRuntime(Runtime::kToBool, 1);
716 // Convert the result (r0) to a condition code.
717 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
718 __ cmp(r0, ip);
719
720 cc_reg_ = ne;
721}
722
723
724void CodeGenerator::GenericBinaryOperation(Token::Value op,
725 OverwriteMode overwrite_mode,
726 int constant_rhs) {
727 VirtualFrame::SpilledScope spilled_scope;
728 // sp[0] : y
729 // sp[1] : x
730 // result : r0
731
732 // Stub is entered with a call: 'return address' is in lr.
733 switch (op) {
734 case Token::ADD: // fall through.
735 case Token::SUB: // fall through.
736 case Token::MUL:
737 case Token::DIV:
738 case Token::MOD:
739 case Token::BIT_OR:
740 case Token::BIT_AND:
741 case Token::BIT_XOR:
742 case Token::SHL:
743 case Token::SHR:
744 case Token::SAR: {
745 frame_->EmitPop(r0); // r0 : y
746 frame_->EmitPop(r1); // r1 : x
747 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
748 frame_->CallStub(&stub, 0);
749 break;
750 }
751
752 case Token::COMMA:
753 frame_->EmitPop(r0);
754 // simply discard left value
755 frame_->Drop();
756 break;
757
758 default:
759 // Other cases should have been handled before this point.
760 UNREACHABLE();
761 break;
762 }
763}
764
765
766class DeferredInlineSmiOperation: public DeferredCode {
767 public:
768 DeferredInlineSmiOperation(Token::Value op,
769 int value,
770 bool reversed,
771 OverwriteMode overwrite_mode)
772 : op_(op),
773 value_(value),
774 reversed_(reversed),
775 overwrite_mode_(overwrite_mode) {
776 set_comment("[ DeferredInlinedSmiOperation");
777 }
778
779 virtual void Generate();
780
781 private:
782 Token::Value op_;
783 int value_;
784 bool reversed_;
785 OverwriteMode overwrite_mode_;
786};
787
788
789void DeferredInlineSmiOperation::Generate() {
790 switch (op_) {
791 case Token::ADD: {
792 // Revert optimistic add.
793 if (reversed_) {
794 __ sub(r0, r0, Operand(Smi::FromInt(value_)));
795 __ mov(r1, Operand(Smi::FromInt(value_)));
796 } else {
797 __ sub(r1, r0, Operand(Smi::FromInt(value_)));
798 __ mov(r0, Operand(Smi::FromInt(value_)));
799 }
800 break;
801 }
802
803 case Token::SUB: {
804 // Revert optimistic sub.
805 if (reversed_) {
806 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
807 __ mov(r1, Operand(Smi::FromInt(value_)));
808 } else {
809 __ add(r1, r0, Operand(Smi::FromInt(value_)));
810 __ mov(r0, Operand(Smi::FromInt(value_)));
811 }
812 break;
813 }
814
815 // For these operations there is no optimistic operation that needs to be
816 // reverted.
817 case Token::MUL:
818 case Token::MOD:
819 case Token::BIT_OR:
820 case Token::BIT_XOR:
821 case Token::BIT_AND: {
822 if (reversed_) {
823 __ mov(r1, Operand(Smi::FromInt(value_)));
824 } else {
825 __ mov(r1, Operand(r0));
826 __ mov(r0, Operand(Smi::FromInt(value_)));
827 }
828 break;
829 }
830
831 case Token::SHL:
832 case Token::SHR:
833 case Token::SAR: {
834 if (!reversed_) {
835 __ mov(r1, Operand(r0));
836 __ mov(r0, Operand(Smi::FromInt(value_)));
837 } else {
838 UNREACHABLE(); // Should have been handled in SmiOperation.
839 }
840 break;
841 }
842
843 default:
844 // Other cases should have been handled before this point.
845 UNREACHABLE();
846 break;
847 }
848
849 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
850 __ CallStub(&stub);
851}
852
853
854static bool PopCountLessThanEqual2(unsigned int x) {
855 x &= x - 1;
856 return (x & (x - 1)) == 0;
857}
858
859
860// Returns the index of the lowest bit set.
861static int BitPosition(unsigned x) {
862 int bit_posn = 0;
863 while ((x & 0xf) == 0) {
864 bit_posn += 4;
865 x >>= 4;
866 }
867 while ((x & 1) == 0) {
868 bit_posn++;
869 x >>= 1;
870 }
871 return bit_posn;
872}
873
874
875void CodeGenerator::SmiOperation(Token::Value op,
876 Handle<Object> value,
877 bool reversed,
878 OverwriteMode mode) {
879 VirtualFrame::SpilledScope spilled_scope;
880 // NOTE: This is an attempt to inline (a bit) more of the code for
881 // some possible smi operations (like + and -) when (at least) one
882 // of the operands is a literal smi. With this optimization, the
883 // performance of the system is increased by ~15%, and the generated
884 // code size is increased by ~1% (measured on a combination of
885 // different benchmarks).
886
887 // sp[0] : operand
888
889 int int_value = Smi::cast(*value)->value();
890
891 JumpTarget exit;
892 frame_->EmitPop(r0);
893
894 bool something_to_inline = true;
895 switch (op) {
896 case Token::ADD: {
897 DeferredCode* deferred =
898 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
899
900 __ add(r0, r0, Operand(value), SetCC);
901 deferred->Branch(vs);
902 __ tst(r0, Operand(kSmiTagMask));
903 deferred->Branch(ne);
904 deferred->BindExit();
905 break;
906 }
907
908 case Token::SUB: {
909 DeferredCode* deferred =
910 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
911
912 if (reversed) {
913 __ rsb(r0, r0, Operand(value), SetCC);
914 } else {
915 __ sub(r0, r0, Operand(value), SetCC);
916 }
917 deferred->Branch(vs);
918 __ tst(r0, Operand(kSmiTagMask));
919 deferred->Branch(ne);
920 deferred->BindExit();
921 break;
922 }
923
924
925 case Token::BIT_OR:
926 case Token::BIT_XOR:
927 case Token::BIT_AND: {
928 DeferredCode* deferred =
929 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
930 __ tst(r0, Operand(kSmiTagMask));
931 deferred->Branch(ne);
932 switch (op) {
933 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
934 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
935 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
936 default: UNREACHABLE();
937 }
938 deferred->BindExit();
939 break;
940 }
941
942 case Token::SHL:
943 case Token::SHR:
944 case Token::SAR: {
945 if (reversed) {
946 something_to_inline = false;
947 break;
948 }
949 int shift_value = int_value & 0x1f; // least significant 5 bits
950 DeferredCode* deferred =
951 new DeferredInlineSmiOperation(op, shift_value, false, mode);
952 __ tst(r0, Operand(kSmiTagMask));
953 deferred->Branch(ne);
954 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
955 switch (op) {
956 case Token::SHL: {
957 if (shift_value != 0) {
958 __ mov(r2, Operand(r2, LSL, shift_value));
959 }
960 // check that the *unsigned* result fits in a smi
961 __ add(r3, r2, Operand(0x40000000), SetCC);
962 deferred->Branch(mi);
963 break;
964 }
965 case Token::SHR: {
966 // LSR by immediate 0 means shifting 32 bits.
967 if (shift_value != 0) {
968 __ mov(r2, Operand(r2, LSR, shift_value));
969 }
970 // check that the *unsigned* result fits in a smi
971 // neither of the two high-order bits can be set:
972 // - 0x80000000: high bit would be lost when smi tagging
973 // - 0x40000000: this number would convert to negative when
974 // smi tagging these two cases can only happen with shifts
975 // by 0 or 1 when handed a valid smi
976 __ and_(r3, r2, Operand(0xc0000000), SetCC);
977 deferred->Branch(ne);
978 break;
979 }
980 case Token::SAR: {
981 if (shift_value != 0) {
982 // ASR by immediate 0 means shifting 32 bits.
983 __ mov(r2, Operand(r2, ASR, shift_value));
984 }
985 break;
986 }
987 default: UNREACHABLE();
988 }
989 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
990 deferred->BindExit();
991 break;
992 }
993
994 case Token::MOD: {
995 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
996 something_to_inline = false;
997 break;
998 }
999 DeferredCode* deferred =
1000 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1001 unsigned mask = (0x80000000u | kSmiTagMask);
1002 __ tst(r0, Operand(mask));
1003 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1004 mask = (int_value << kSmiTagSize) - 1;
1005 __ and_(r0, r0, Operand(mask));
1006 deferred->BindExit();
1007 break;
1008 }
1009
1010 case Token::MUL: {
1011 if (!IsEasyToMultiplyBy(int_value)) {
1012 something_to_inline = false;
1013 break;
1014 }
1015 DeferredCode* deferred =
1016 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1017 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1018 max_smi_that_wont_overflow <<= kSmiTagSize;
1019 unsigned mask = 0x80000000u;
1020 while ((mask & max_smi_that_wont_overflow) == 0) {
1021 mask |= mask >> 1;
1022 }
1023 mask |= kSmiTagMask;
1024 // This does a single mask that checks for a too high value in a
1025 // conservative way and for a non-Smi. It also filters out negative
1026 // numbers, unfortunately, but since this code is inline we prefer
1027 // brevity to comprehensiveness.
1028 __ tst(r0, Operand(mask));
1029 deferred->Branch(ne);
1030 MultiplyByKnownInt(masm_, r0, r0, int_value);
1031 deferred->BindExit();
1032 break;
1033 }
1034
1035 default:
1036 something_to_inline = false;
1037 break;
1038 }
1039
1040 if (!something_to_inline) {
1041 if (!reversed) {
1042 frame_->EmitPush(r0);
1043 __ mov(r0, Operand(value));
1044 frame_->EmitPush(r0);
1045 GenericBinaryOperation(op, mode, int_value);
1046 } else {
1047 __ mov(ip, Operand(value));
1048 frame_->EmitPush(ip);
1049 frame_->EmitPush(r0);
1050 GenericBinaryOperation(op, mode, kUnknownIntValue);
1051 }
1052 }
1053
1054 exit.Bind();
1055}
1056
1057
1058void CodeGenerator::Comparison(Condition cc,
1059 Expression* left,
1060 Expression* right,
1061 bool strict) {
1062 if (left != NULL) LoadAndSpill(left);
1063 if (right != NULL) LoadAndSpill(right);
1064
1065 VirtualFrame::SpilledScope spilled_scope;
1066 // sp[0] : y
1067 // sp[1] : x
1068 // result : cc register
1069
1070 // Strict only makes sense for equality comparisons.
1071 ASSERT(!strict || cc == eq);
1072
1073 JumpTarget exit;
1074 JumpTarget smi;
1075 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1076 if (cc == gt || cc == le) {
1077 cc = ReverseCondition(cc);
1078 frame_->EmitPop(r1);
1079 frame_->EmitPop(r0);
1080 } else {
1081 frame_->EmitPop(r0);
1082 frame_->EmitPop(r1);
1083 }
1084 __ orr(r2, r0, Operand(r1));
1085 __ tst(r2, Operand(kSmiTagMask));
1086 smi.Branch(eq);
1087
1088 // Perform non-smi comparison by stub.
1089 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1090 // We call with 0 args because there are 0 on the stack.
1091 CompareStub stub(cc, strict);
1092 frame_->CallStub(&stub, 0);
1093 __ cmp(r0, Operand(0));
1094 exit.Jump();
1095
1096 // Do smi comparisons by pointer comparison.
1097 smi.Bind();
1098 __ cmp(r1, Operand(r0));
1099
1100 exit.Bind();
1101 cc_reg_ = cc;
1102}
1103
1104
Steve Blocka7e24c12009-10-30 11:49:00 +00001105// Call the function on the stack with the given arguments.
1106void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001107 CallFunctionFlags flags,
1108 int position) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 VirtualFrame::SpilledScope spilled_scope;
1110 // Push the arguments ("left-to-right") on the stack.
1111 int arg_count = args->length();
1112 for (int i = 0; i < arg_count; i++) {
1113 LoadAndSpill(args->at(i));
1114 }
1115
1116 // Record the position for debugging purposes.
1117 CodeForSourcePosition(position);
1118
1119 // Use the shared code stub to call the function.
1120 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001121 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001122 frame_->CallStub(&call_function, arg_count + 1);
1123
1124 // Restore context and pop function from the stack.
1125 __ ldr(cp, frame_->Context());
1126 frame_->Drop(); // discard the TOS
1127}
1128
1129
1130void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1131 VirtualFrame::SpilledScope spilled_scope;
1132 ASSERT(has_cc());
1133 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1134 target->Branch(cc);
1135 cc_reg_ = al;
1136}
1137
1138
1139void CodeGenerator::CheckStack() {
1140 VirtualFrame::SpilledScope spilled_scope;
Steve Blockd0582a62009-12-15 09:54:21 +00001141 Comment cmnt(masm_, "[ check stack");
1142 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1143 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1144 // the implicit 8 byte offset that always applies to operations with pc and
1145 // gives a return address 12 bytes down.
1146 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1147 masm_->cmp(sp, Operand(ip));
1148 StackCheckStub stub;
1149 // Call the stub if lower.
1150 masm_->mov(pc,
1151 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1152 RelocInfo::CODE_TARGET),
1153 LeaveCC,
1154 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001155}
1156
1157
1158void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1159#ifdef DEBUG
1160 int original_height = frame_->height();
1161#endif
1162 VirtualFrame::SpilledScope spilled_scope;
1163 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1164 VisitAndSpill(statements->at(i));
1165 }
1166 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1167}
1168
1169
1170void CodeGenerator::VisitBlock(Block* node) {
1171#ifdef DEBUG
1172 int original_height = frame_->height();
1173#endif
1174 VirtualFrame::SpilledScope spilled_scope;
1175 Comment cmnt(masm_, "[ Block");
1176 CodeForStatementPosition(node);
1177 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1178 VisitStatementsAndSpill(node->statements());
1179 if (node->break_target()->is_linked()) {
1180 node->break_target()->Bind();
1181 }
1182 node->break_target()->Unuse();
1183 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1184}
1185
1186
1187void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1188 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001189 frame_->EmitPush(cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001190 __ mov(r0, Operand(pairs));
1191 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001192 __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1193 frame_->EmitPush(r0);
1194 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1195 // The result is discarded.
1196}
1197
1198
1199void CodeGenerator::VisitDeclaration(Declaration* node) {
1200#ifdef DEBUG
1201 int original_height = frame_->height();
1202#endif
1203 VirtualFrame::SpilledScope spilled_scope;
1204 Comment cmnt(masm_, "[ Declaration");
1205 Variable* var = node->proxy()->var();
1206 ASSERT(var != NULL); // must have been resolved
1207 Slot* slot = var->slot();
1208
1209 // If it was not possible to allocate the variable at compile time,
1210 // we need to "declare" it at runtime to make sure it actually
1211 // exists in the local context.
1212 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1213 // Variables with a "LOOKUP" slot were introduced as non-locals
1214 // during variable resolution and must have mode DYNAMIC.
1215 ASSERT(var->is_dynamic());
1216 // For now, just do a runtime call.
1217 frame_->EmitPush(cp);
1218 __ mov(r0, Operand(var->name()));
1219 frame_->EmitPush(r0);
1220 // Declaration nodes are always declared in only two modes.
1221 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1222 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1223 __ mov(r0, Operand(Smi::FromInt(attr)));
1224 frame_->EmitPush(r0);
1225 // Push initial value, if any.
1226 // Note: For variables we must not push an initial value (such as
1227 // 'undefined') because we may have a (legal) redeclaration and we
1228 // must not destroy the current value.
1229 if (node->mode() == Variable::CONST) {
1230 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
1231 frame_->EmitPush(r0);
1232 } else if (node->fun() != NULL) {
1233 LoadAndSpill(node->fun());
1234 } else {
1235 __ mov(r0, Operand(0)); // no initial value!
1236 frame_->EmitPush(r0);
1237 }
1238 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1239 // Ignore the return value (declarations are statements).
1240 ASSERT(frame_->height() == original_height);
1241 return;
1242 }
1243
1244 ASSERT(!var->is_global());
1245
1246 // If we have a function or a constant, we need to initialize the variable.
1247 Expression* val = NULL;
1248 if (node->mode() == Variable::CONST) {
1249 val = new Literal(Factory::the_hole_value());
1250 } else {
1251 val = node->fun(); // NULL if we don't have a function
1252 }
1253
1254 if (val != NULL) {
1255 {
1256 // Set initial value.
1257 Reference target(this, node->proxy());
1258 LoadAndSpill(val);
1259 target.SetValue(NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00001260 }
1261 // Get rid of the assigned value (declarations are statements).
1262 frame_->Drop();
1263 }
1264 ASSERT(frame_->height() == original_height);
1265}
1266
1267
1268void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1269#ifdef DEBUG
1270 int original_height = frame_->height();
1271#endif
1272 VirtualFrame::SpilledScope spilled_scope;
1273 Comment cmnt(masm_, "[ ExpressionStatement");
1274 CodeForStatementPosition(node);
1275 Expression* expression = node->expression();
1276 expression->MarkAsStatement();
1277 LoadAndSpill(expression);
1278 frame_->Drop();
1279 ASSERT(frame_->height() == original_height);
1280}
1281
1282
1283void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1284#ifdef DEBUG
1285 int original_height = frame_->height();
1286#endif
1287 VirtualFrame::SpilledScope spilled_scope;
1288 Comment cmnt(masm_, "// EmptyStatement");
1289 CodeForStatementPosition(node);
1290 // nothing to do
1291 ASSERT(frame_->height() == original_height);
1292}
1293
1294
1295void CodeGenerator::VisitIfStatement(IfStatement* node) {
1296#ifdef DEBUG
1297 int original_height = frame_->height();
1298#endif
1299 VirtualFrame::SpilledScope spilled_scope;
1300 Comment cmnt(masm_, "[ IfStatement");
1301 // Generate different code depending on which parts of the if statement
1302 // are present or not.
1303 bool has_then_stm = node->HasThenStatement();
1304 bool has_else_stm = node->HasElseStatement();
1305
1306 CodeForStatementPosition(node);
1307
1308 JumpTarget exit;
1309 if (has_then_stm && has_else_stm) {
1310 Comment cmnt(masm_, "[ IfThenElse");
1311 JumpTarget then;
1312 JumpTarget else_;
1313 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001314 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001315 if (frame_ != NULL) {
1316 Branch(false, &else_);
1317 }
1318 // then
1319 if (frame_ != NULL || then.is_linked()) {
1320 then.Bind();
1321 VisitAndSpill(node->then_statement());
1322 }
1323 if (frame_ != NULL) {
1324 exit.Jump();
1325 }
1326 // else
1327 if (else_.is_linked()) {
1328 else_.Bind();
1329 VisitAndSpill(node->else_statement());
1330 }
1331
1332 } else if (has_then_stm) {
1333 Comment cmnt(masm_, "[ IfThen");
1334 ASSERT(!has_else_stm);
1335 JumpTarget then;
1336 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001337 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001338 if (frame_ != NULL) {
1339 Branch(false, &exit);
1340 }
1341 // then
1342 if (frame_ != NULL || then.is_linked()) {
1343 then.Bind();
1344 VisitAndSpill(node->then_statement());
1345 }
1346
1347 } else if (has_else_stm) {
1348 Comment cmnt(masm_, "[ IfElse");
1349 ASSERT(!has_then_stm);
1350 JumpTarget else_;
1351 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001352 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001353 if (frame_ != NULL) {
1354 Branch(true, &exit);
1355 }
1356 // else
1357 if (frame_ != NULL || else_.is_linked()) {
1358 else_.Bind();
1359 VisitAndSpill(node->else_statement());
1360 }
1361
1362 } else {
1363 Comment cmnt(masm_, "[ If");
1364 ASSERT(!has_then_stm && !has_else_stm);
1365 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001366 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001367 if (frame_ != NULL) {
1368 if (has_cc()) {
1369 cc_reg_ = al;
1370 } else {
1371 frame_->Drop();
1372 }
1373 }
1374 }
1375
1376 // end
1377 if (exit.is_linked()) {
1378 exit.Bind();
1379 }
1380 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1381}
1382
1383
1384void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1385 VirtualFrame::SpilledScope spilled_scope;
1386 Comment cmnt(masm_, "[ ContinueStatement");
1387 CodeForStatementPosition(node);
1388 node->target()->continue_target()->Jump();
1389}
1390
1391
1392void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1393 VirtualFrame::SpilledScope spilled_scope;
1394 Comment cmnt(masm_, "[ BreakStatement");
1395 CodeForStatementPosition(node);
1396 node->target()->break_target()->Jump();
1397}
1398
1399
1400void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1401 VirtualFrame::SpilledScope spilled_scope;
1402 Comment cmnt(masm_, "[ ReturnStatement");
1403
1404 CodeForStatementPosition(node);
1405 LoadAndSpill(node->expression());
1406 if (function_return_is_shadowed_) {
1407 frame_->EmitPop(r0);
1408 function_return_.Jump();
1409 } else {
1410 // Pop the result from the frame and prepare the frame for
1411 // returning thus making it easier to merge.
1412 frame_->EmitPop(r0);
1413 frame_->PrepareForReturn();
1414
1415 function_return_.Jump();
1416 }
1417}
1418
1419
1420void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1421#ifdef DEBUG
1422 int original_height = frame_->height();
1423#endif
1424 VirtualFrame::SpilledScope spilled_scope;
1425 Comment cmnt(masm_, "[ WithEnterStatement");
1426 CodeForStatementPosition(node);
1427 LoadAndSpill(node->expression());
1428 if (node->is_catch_block()) {
1429 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1430 } else {
1431 frame_->CallRuntime(Runtime::kPushContext, 1);
1432 }
1433#ifdef DEBUG
1434 JumpTarget verified_true;
1435 __ cmp(r0, Operand(cp));
1436 verified_true.Branch(eq);
1437 __ stop("PushContext: r0 is expected to be the same as cp");
1438 verified_true.Bind();
1439#endif
1440 // Update context local.
1441 __ str(cp, frame_->Context());
1442 ASSERT(frame_->height() == original_height);
1443}
1444
1445
1446void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1447#ifdef DEBUG
1448 int original_height = frame_->height();
1449#endif
1450 VirtualFrame::SpilledScope spilled_scope;
1451 Comment cmnt(masm_, "[ WithExitStatement");
1452 CodeForStatementPosition(node);
1453 // Pop context.
1454 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1455 // Update context local.
1456 __ str(cp, frame_->Context());
1457 ASSERT(frame_->height() == original_height);
1458}
1459
1460
1461void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1462#ifdef DEBUG
1463 int original_height = frame_->height();
1464#endif
1465 VirtualFrame::SpilledScope spilled_scope;
1466 Comment cmnt(masm_, "[ SwitchStatement");
1467 CodeForStatementPosition(node);
1468 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1469
1470 LoadAndSpill(node->tag());
1471
1472 JumpTarget next_test;
1473 JumpTarget fall_through;
1474 JumpTarget default_entry;
1475 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1476 ZoneList<CaseClause*>* cases = node->cases();
1477 int length = cases->length();
1478 CaseClause* default_clause = NULL;
1479
1480 for (int i = 0; i < length; i++) {
1481 CaseClause* clause = cases->at(i);
1482 if (clause->is_default()) {
1483 // Remember the default clause and compile it at the end.
1484 default_clause = clause;
1485 continue;
1486 }
1487
1488 Comment cmnt(masm_, "[ Case clause");
1489 // Compile the test.
1490 next_test.Bind();
1491 next_test.Unuse();
1492 // Duplicate TOS.
1493 __ ldr(r0, frame_->Top());
1494 frame_->EmitPush(r0);
1495 Comparison(eq, NULL, clause->label(), true);
1496 Branch(false, &next_test);
1497
1498 // Before entering the body from the test, remove the switch value from
1499 // the stack.
1500 frame_->Drop();
1501
1502 // Label the body so that fall through is enabled.
1503 if (i > 0 && cases->at(i - 1)->is_default()) {
1504 default_exit.Bind();
1505 } else {
1506 fall_through.Bind();
1507 fall_through.Unuse();
1508 }
1509 VisitStatementsAndSpill(clause->statements());
1510
1511 // If control flow can fall through from the body, jump to the next body
1512 // or the end of the statement.
1513 if (frame_ != NULL) {
1514 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1515 default_entry.Jump();
1516 } else {
1517 fall_through.Jump();
1518 }
1519 }
1520 }
1521
1522 // The final "test" removes the switch value.
1523 next_test.Bind();
1524 frame_->Drop();
1525
1526 // If there is a default clause, compile it.
1527 if (default_clause != NULL) {
1528 Comment cmnt(masm_, "[ Default clause");
1529 default_entry.Bind();
1530 VisitStatementsAndSpill(default_clause->statements());
1531 // If control flow can fall out of the default and there is a case after
1532 // it, jup to that case's body.
1533 if (frame_ != NULL && default_exit.is_bound()) {
1534 default_exit.Jump();
1535 }
1536 }
1537
1538 if (fall_through.is_linked()) {
1539 fall_through.Bind();
1540 }
1541
1542 if (node->break_target()->is_linked()) {
1543 node->break_target()->Bind();
1544 }
1545 node->break_target()->Unuse();
1546 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1547}
1548
1549
Steve Block3ce2e202009-11-05 08:53:23 +00001550void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001551#ifdef DEBUG
1552 int original_height = frame_->height();
1553#endif
1554 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001555 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001556 CodeForStatementPosition(node);
1557 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
Steve Block3ce2e202009-11-05 08:53:23 +00001558 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001559
Steve Block3ce2e202009-11-05 08:53:23 +00001560 // Label the top of the loop for the backward CFG edge. If the test
1561 // is always true we can use the continue target, and if the test is
1562 // always false there is no need.
1563 ConditionAnalysis info = AnalyzeCondition(node->cond());
1564 switch (info) {
1565 case ALWAYS_TRUE:
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1567 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001568 break;
1569 case ALWAYS_FALSE:
1570 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1571 break;
1572 case DONT_KNOW:
1573 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1574 body.Bind();
1575 break;
1576 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001577
Steve Block3ce2e202009-11-05 08:53:23 +00001578 CheckStack(); // TODO(1222600): ignore if body contains calls.
1579 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001580
Steve Blockd0582a62009-12-15 09:54:21 +00001581 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001582 switch (info) {
1583 case ALWAYS_TRUE:
1584 // If control can fall off the end of the body, jump back to the
1585 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001587 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001588 }
1589 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001590 case ALWAYS_FALSE:
1591 // If we have a continue in the body, we only have to bind its
1592 // jump target.
1593 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001595 }
Steve Block3ce2e202009-11-05 08:53:23 +00001596 break;
1597 case DONT_KNOW:
1598 // We have to compile the test expression if it can be reached by
1599 // control flow falling out of the body or via continue.
1600 if (node->continue_target()->is_linked()) {
1601 node->continue_target()->Bind();
1602 }
1603 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00001604 Comment cmnt(masm_, "[ DoWhileCondition");
1605 CodeForDoWhileConditionPosition(node);
1606 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001608 // A invalid frame here indicates that control did not
1609 // fall out of the test expression.
1610 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00001611 }
1612 }
1613 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001614 }
1615
1616 if (node->break_target()->is_linked()) {
1617 node->break_target()->Bind();
1618 }
Steve Block3ce2e202009-11-05 08:53:23 +00001619 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1620}
1621
1622
1623void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1624#ifdef DEBUG
1625 int original_height = frame_->height();
1626#endif
1627 VirtualFrame::SpilledScope spilled_scope;
1628 Comment cmnt(masm_, "[ WhileStatement");
1629 CodeForStatementPosition(node);
1630
1631 // If the test is never true and has no side effects there is no need
1632 // to compile the test or body.
1633 ConditionAnalysis info = AnalyzeCondition(node->cond());
1634 if (info == ALWAYS_FALSE) return;
1635
1636 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1637
1638 // Label the top of the loop with the continue target for the backward
1639 // CFG edge.
1640 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1641 node->continue_target()->Bind();
1642
1643 if (info == DONT_KNOW) {
1644 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001645 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001646 if (has_valid_frame()) {
1647 // A NULL frame indicates that control did not fall out of the
1648 // test expression.
1649 Branch(false, node->break_target());
1650 }
1651 if (has_valid_frame() || body.is_linked()) {
1652 body.Bind();
1653 }
1654 }
1655
1656 if (has_valid_frame()) {
1657 CheckStack(); // TODO(1222600): ignore if body contains calls.
1658 VisitAndSpill(node->body());
1659
1660 // If control flow can fall out of the body, jump back to the top.
1661 if (has_valid_frame()) {
1662 node->continue_target()->Jump();
1663 }
1664 }
1665 if (node->break_target()->is_linked()) {
1666 node->break_target()->Bind();
1667 }
1668 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1669}
1670
1671
1672void CodeGenerator::VisitForStatement(ForStatement* node) {
1673#ifdef DEBUG
1674 int original_height = frame_->height();
1675#endif
1676 VirtualFrame::SpilledScope spilled_scope;
1677 Comment cmnt(masm_, "[ ForStatement");
1678 CodeForStatementPosition(node);
1679 if (node->init() != NULL) {
1680 VisitAndSpill(node->init());
1681 }
1682
1683 // If the test is never true there is no need to compile the test or
1684 // body.
1685 ConditionAnalysis info = AnalyzeCondition(node->cond());
1686 if (info == ALWAYS_FALSE) return;
1687
1688 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1689
1690 // If there is no update statement, label the top of the loop with the
1691 // continue target, otherwise with the loop target.
1692 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1693 if (node->next() == NULL) {
1694 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1695 node->continue_target()->Bind();
1696 } else {
1697 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1698 loop.Bind();
1699 }
1700
1701 // If the test is always true, there is no need to compile it.
1702 if (info == DONT_KNOW) {
1703 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00001704 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00001705 if (has_valid_frame()) {
1706 Branch(false, node->break_target());
1707 }
1708 if (has_valid_frame() || body.is_linked()) {
1709 body.Bind();
1710 }
1711 }
1712
1713 if (has_valid_frame()) {
1714 CheckStack(); // TODO(1222600): ignore if body contains calls.
1715 VisitAndSpill(node->body());
1716
1717 if (node->next() == NULL) {
1718 // If there is no update statement and control flow can fall out
1719 // of the loop, jump directly to the continue label.
1720 if (has_valid_frame()) {
1721 node->continue_target()->Jump();
1722 }
1723 } else {
1724 // If there is an update statement and control flow can reach it
1725 // via falling out of the body of the loop or continuing, we
1726 // compile the update statement.
1727 if (node->continue_target()->is_linked()) {
1728 node->continue_target()->Bind();
1729 }
1730 if (has_valid_frame()) {
1731 // Record source position of the statement as this code which is
1732 // after the code for the body actually belongs to the loop
1733 // statement and not the body.
1734 CodeForStatementPosition(node);
1735 VisitAndSpill(node->next());
1736 loop.Jump();
1737 }
1738 }
1739 }
1740 if (node->break_target()->is_linked()) {
1741 node->break_target()->Bind();
1742 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1744}
1745
1746
1747void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1748#ifdef DEBUG
1749 int original_height = frame_->height();
1750#endif
1751 VirtualFrame::SpilledScope spilled_scope;
1752 Comment cmnt(masm_, "[ ForInStatement");
1753 CodeForStatementPosition(node);
1754
1755 JumpTarget primitive;
1756 JumpTarget jsobject;
1757 JumpTarget fixed_array;
1758 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1759 JumpTarget end_del_check;
1760 JumpTarget exit;
1761
1762 // Get the object to enumerate over (converted to JSObject).
1763 LoadAndSpill(node->enumerable());
1764
1765 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1766 // to the specification. 12.6.4 mandates a call to ToObject.
1767 frame_->EmitPop(r0);
1768 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1769 __ cmp(r0, ip);
1770 exit.Branch(eq);
1771 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1772 __ cmp(r0, ip);
1773 exit.Branch(eq);
1774
1775 // Stack layout in body:
1776 // [iteration counter (Smi)]
1777 // [length of array]
1778 // [FixedArray]
1779 // [Map or 0]
1780 // [Object]
1781
1782 // Check if enumerable is already a JSObject
1783 __ tst(r0, Operand(kSmiTagMask));
1784 primitive.Branch(eq);
1785 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
1786 jsobject.Branch(hs);
1787
1788 primitive.Bind();
1789 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00001790 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001791
1792 jsobject.Bind();
1793 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00001794 // r0: value to be iterated over
1795 frame_->EmitPush(r0); // Push the object being iterated over.
1796
1797 // Check cache validity in generated code. This is a fast case for
1798 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1799 // guarantee cache validity, call the runtime system to check cache
1800 // validity or get the property names in a fixed array.
1801 JumpTarget call_runtime;
1802 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1803 JumpTarget check_prototype;
1804 JumpTarget use_cache;
1805 __ mov(r1, Operand(r0));
1806 loop.Bind();
1807 // Check that there are no elements.
1808 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
1809 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
1810 __ cmp(r2, r4);
1811 call_runtime.Branch(ne);
1812 // Check that instance descriptors are not empty so that we can
1813 // check for an enum cache. Leave the map in r3 for the subsequent
1814 // prototype load.
1815 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
1816 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
1817 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
1818 __ cmp(r2, ip);
1819 call_runtime.Branch(eq);
1820 // Check that there in an enum cache in the non-empty instance
1821 // descriptors. This is the case if the next enumeration index
1822 // field does not contain a smi.
1823 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
1824 __ tst(r2, Operand(kSmiTagMask));
1825 call_runtime.Branch(eq);
1826 // For all objects but the receiver, check that the cache is empty.
1827 // r4: empty fixed array root.
1828 __ cmp(r1, r0);
1829 check_prototype.Branch(eq);
1830 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1831 __ cmp(r2, r4);
1832 call_runtime.Branch(ne);
1833 check_prototype.Bind();
1834 // Load the prototype from the map and loop if non-null.
1835 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
1836 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1837 __ cmp(r1, ip);
1838 loop.Branch(ne);
1839 // The enum cache is valid. Load the map of the object being
1840 // iterated over and use the cache for the iteration.
1841 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
1842 use_cache.Jump();
1843
1844 call_runtime.Bind();
1845 // Call the runtime to get the property names for the object.
1846 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1848
Steve Blockd0582a62009-12-15 09:54:21 +00001849 // If we got a map from the runtime call, we can do a fast
1850 // modification check. Otherwise, we got a fixed array, and we have
1851 // to do a slow check.
1852 // r0: map or fixed array (result from call to
1853 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 __ mov(r2, Operand(r0));
1855 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
1856 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
1857 __ cmp(r1, ip);
1858 fixed_array.Branch(ne);
1859
Steve Blockd0582a62009-12-15 09:54:21 +00001860 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001861 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00001862 // r0: map (either the result from a call to
1863 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1864 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00001865 __ mov(r1, Operand(r0));
1866 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
1867 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
1868 __ ldr(r2,
1869 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
1870
1871 frame_->EmitPush(r0); // map
1872 frame_->EmitPush(r2); // enum cache bridge cache
1873 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
1874 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1875 frame_->EmitPush(r0);
1876 __ mov(r0, Operand(Smi::FromInt(0)));
1877 frame_->EmitPush(r0);
1878 entry.Jump();
1879
1880 fixed_array.Bind();
1881 __ mov(r1, Operand(Smi::FromInt(0)));
1882 frame_->EmitPush(r1); // insert 0 in place of Map
1883 frame_->EmitPush(r0);
1884
1885 // Push the length of the array and the initial index onto the stack.
1886 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
1887 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1888 frame_->EmitPush(r0);
1889 __ mov(r0, Operand(Smi::FromInt(0))); // init index
1890 frame_->EmitPush(r0);
1891
1892 // Condition.
1893 entry.Bind();
1894 // sp[0] : index
1895 // sp[1] : array/enum cache length
1896 // sp[2] : array or enum cache
1897 // sp[3] : 0 or map
1898 // sp[4] : enumerable
1899 // Grab the current frame's height for the break and continue
1900 // targets only after all the state is pushed on the frame.
1901 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1902 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1903
1904 __ ldr(r0, frame_->ElementAt(0)); // load the current count
1905 __ ldr(r1, frame_->ElementAt(1)); // load the length
1906 __ cmp(r0, Operand(r1)); // compare to the array length
1907 node->break_target()->Branch(hs);
1908
1909 __ ldr(r0, frame_->ElementAt(0));
1910
1911 // Get the i'th entry of the array.
1912 __ ldr(r2, frame_->ElementAt(2));
1913 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1914 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1915
1916 // Get Map or 0.
1917 __ ldr(r2, frame_->ElementAt(3));
1918 // Check if this (still) matches the map of the enumerable.
1919 // If not, we have to filter the key.
1920 __ ldr(r1, frame_->ElementAt(4));
1921 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
1922 __ cmp(r1, Operand(r2));
1923 end_del_check.Branch(eq);
1924
1925 // Convert the entry to a string (or null if it isn't a property anymore).
1926 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
1927 frame_->EmitPush(r0);
1928 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00001929 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001930 __ mov(r3, Operand(r0));
1931
1932 // If the property has been removed while iterating, we just skip it.
1933 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1934 __ cmp(r3, ip);
1935 node->continue_target()->Branch(eq);
1936
1937 end_del_check.Bind();
1938 // Store the entry in the 'each' expression and take another spin in the
1939 // loop. r3: i'th entry of the enum cache (or string there of)
1940 frame_->EmitPush(r3); // push entry
1941 { Reference each(this, node->each());
1942 if (!each.is_illegal()) {
1943 if (each.size() > 0) {
1944 __ ldr(r0, frame_->ElementAt(each.size()));
1945 frame_->EmitPush(r0);
Leon Clarked91b9f72010-01-27 17:25:45 +00001946 each.SetValue(NOT_CONST_INIT);
1947 frame_->Drop(2);
1948 } else {
1949 // If the reference was to a slot we rely on the convenient property
1950 // that it doesn't matter whether a value (eg, r3 pushed above) is
1951 // right on top of or right underneath a zero-sized reference.
1952 each.SetValue(NOT_CONST_INIT);
1953 frame_->Drop();
Steve Blocka7e24c12009-10-30 11:49:00 +00001954 }
1955 }
1956 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001957 // Body.
1958 CheckStack(); // TODO(1222600): ignore if body contains calls.
1959 VisitAndSpill(node->body());
1960
1961 // Next. Reestablish a spilled frame in case we are coming here via
1962 // a continue in the body.
1963 node->continue_target()->Bind();
1964 frame_->SpillAll();
1965 frame_->EmitPop(r0);
1966 __ add(r0, r0, Operand(Smi::FromInt(1)));
1967 frame_->EmitPush(r0);
1968 entry.Jump();
1969
1970 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1971 // any frame.
1972 node->break_target()->Bind();
1973 frame_->Drop(5);
1974
1975 // Exit.
1976 exit.Bind();
1977 node->continue_target()->Unuse();
1978 node->break_target()->Unuse();
1979 ASSERT(frame_->height() == original_height);
1980}
1981
1982
Steve Block3ce2e202009-11-05 08:53:23 +00001983void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001984#ifdef DEBUG
1985 int original_height = frame_->height();
1986#endif
1987 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00001988 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001989 CodeForStatementPosition(node);
1990
1991 JumpTarget try_block;
1992 JumpTarget exit;
1993
1994 try_block.Call();
1995 // --- Catch block ---
1996 frame_->EmitPush(r0);
1997
1998 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00001999 Variable* catch_var = node->catch_var()->var();
2000 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2001 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00002002
2003 // Remove the exception from the stack.
2004 frame_->Drop();
2005
2006 VisitStatementsAndSpill(node->catch_block()->statements());
2007 if (frame_ != NULL) {
2008 exit.Jump();
2009 }
2010
2011
2012 // --- Try block ---
2013 try_block.Bind();
2014
2015 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2016 int handler_height = frame_->height();
2017
2018 // Shadow the labels for all escapes from the try block, including
2019 // returns. During shadowing, the original label is hidden as the
2020 // LabelShadow and operations on the original actually affect the
2021 // shadowing label.
2022 //
2023 // We should probably try to unify the escaping labels and the return
2024 // label.
2025 int nof_escapes = node->escaping_targets()->length();
2026 List<ShadowTarget*> shadows(1 + nof_escapes);
2027
2028 // Add the shadow target for the function return.
2029 static const int kReturnShadowIndex = 0;
2030 shadows.Add(new ShadowTarget(&function_return_));
2031 bool function_return_was_shadowed = function_return_is_shadowed_;
2032 function_return_is_shadowed_ = true;
2033 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2034
2035 // Add the remaining shadow targets.
2036 for (int i = 0; i < nof_escapes; i++) {
2037 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2038 }
2039
2040 // Generate code for the statements in the try block.
2041 VisitStatementsAndSpill(node->try_block()->statements());
2042
2043 // Stop the introduced shadowing and count the number of required unlinks.
2044 // After shadowing stops, the original labels are unshadowed and the
2045 // LabelShadows represent the formerly shadowing labels.
2046 bool has_unlinks = false;
2047 for (int i = 0; i < shadows.length(); i++) {
2048 shadows[i]->StopShadowing();
2049 has_unlinks = has_unlinks || shadows[i]->is_linked();
2050 }
2051 function_return_is_shadowed_ = function_return_was_shadowed;
2052
2053 // Get an external reference to the handler address.
2054 ExternalReference handler_address(Top::k_handler_address);
2055
2056 // If we can fall off the end of the try block, unlink from try chain.
2057 if (has_valid_frame()) {
2058 // The next handler address is on top of the frame. Unlink from
2059 // the handler list and drop the rest of this handler from the
2060 // frame.
2061 ASSERT(StackHandlerConstants::kNextOffset == 0);
2062 frame_->EmitPop(r1);
2063 __ mov(r3, Operand(handler_address));
2064 __ str(r1, MemOperand(r3));
2065 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2066 if (has_unlinks) {
2067 exit.Jump();
2068 }
2069 }
2070
2071 // Generate unlink code for the (formerly) shadowing labels that have been
2072 // jumped to. Deallocate each shadow target.
2073 for (int i = 0; i < shadows.length(); i++) {
2074 if (shadows[i]->is_linked()) {
2075 // Unlink from try chain;
2076 shadows[i]->Bind();
2077 // Because we can be jumping here (to spilled code) from unspilled
2078 // code, we need to reestablish a spilled frame at this block.
2079 frame_->SpillAll();
2080
2081 // Reload sp from the top handler, because some statements that we
2082 // break from (eg, for...in) may have left stuff on the stack.
2083 __ mov(r3, Operand(handler_address));
2084 __ ldr(sp, MemOperand(r3));
2085 frame_->Forget(frame_->height() - handler_height);
2086
2087 ASSERT(StackHandlerConstants::kNextOffset == 0);
2088 frame_->EmitPop(r1);
2089 __ str(r1, MemOperand(r3));
2090 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2091
2092 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2093 frame_->PrepareForReturn();
2094 }
2095 shadows[i]->other_target()->Jump();
2096 }
2097 }
2098
2099 exit.Bind();
2100 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2101}
2102
2103
Steve Block3ce2e202009-11-05 08:53:23 +00002104void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002105#ifdef DEBUG
2106 int original_height = frame_->height();
2107#endif
2108 VirtualFrame::SpilledScope spilled_scope;
Steve Block3ce2e202009-11-05 08:53:23 +00002109 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002110 CodeForStatementPosition(node);
2111
2112 // State: Used to keep track of reason for entering the finally
2113 // block. Should probably be extended to hold information for
2114 // break/continue from within the try block.
2115 enum { FALLING, THROWING, JUMPING };
2116
2117 JumpTarget try_block;
2118 JumpTarget finally_block;
2119
2120 try_block.Call();
2121
2122 frame_->EmitPush(r0); // save exception object on the stack
2123 // In case of thrown exceptions, this is where we continue.
2124 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2125 finally_block.Jump();
2126
2127 // --- Try block ---
2128 try_block.Bind();
2129
2130 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2131 int handler_height = frame_->height();
2132
2133 // Shadow the labels for all escapes from the try block, including
2134 // returns. Shadowing hides the original label as the LabelShadow and
2135 // operations on the original actually affect the shadowing label.
2136 //
2137 // We should probably try to unify the escaping labels and the return
2138 // label.
2139 int nof_escapes = node->escaping_targets()->length();
2140 List<ShadowTarget*> shadows(1 + nof_escapes);
2141
2142 // Add the shadow target for the function return.
2143 static const int kReturnShadowIndex = 0;
2144 shadows.Add(new ShadowTarget(&function_return_));
2145 bool function_return_was_shadowed = function_return_is_shadowed_;
2146 function_return_is_shadowed_ = true;
2147 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2148
2149 // Add the remaining shadow targets.
2150 for (int i = 0; i < nof_escapes; i++) {
2151 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2152 }
2153
2154 // Generate code for the statements in the try block.
2155 VisitStatementsAndSpill(node->try_block()->statements());
2156
2157 // Stop the introduced shadowing and count the number of required unlinks.
2158 // After shadowing stops, the original labels are unshadowed and the
2159 // LabelShadows represent the formerly shadowing labels.
2160 int nof_unlinks = 0;
2161 for (int i = 0; i < shadows.length(); i++) {
2162 shadows[i]->StopShadowing();
2163 if (shadows[i]->is_linked()) nof_unlinks++;
2164 }
2165 function_return_is_shadowed_ = function_return_was_shadowed;
2166
2167 // Get an external reference to the handler address.
2168 ExternalReference handler_address(Top::k_handler_address);
2169
2170 // If we can fall off the end of the try block, unlink from the try
2171 // chain and set the state on the frame to FALLING.
2172 if (has_valid_frame()) {
2173 // The next handler address is on top of the frame.
2174 ASSERT(StackHandlerConstants::kNextOffset == 0);
2175 frame_->EmitPop(r1);
2176 __ mov(r3, Operand(handler_address));
2177 __ str(r1, MemOperand(r3));
2178 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2179
2180 // Fake a top of stack value (unneeded when FALLING) and set the
2181 // state in r2, then jump around the unlink blocks if any.
2182 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2183 frame_->EmitPush(r0);
2184 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2185 if (nof_unlinks > 0) {
2186 finally_block.Jump();
2187 }
2188 }
2189
2190 // Generate code to unlink and set the state for the (formerly)
2191 // shadowing targets that have been jumped to.
2192 for (int i = 0; i < shadows.length(); i++) {
2193 if (shadows[i]->is_linked()) {
2194 // If we have come from the shadowed return, the return value is
2195 // in (a non-refcounted reference to) r0. We must preserve it
2196 // until it is pushed.
2197 //
2198 // Because we can be jumping here (to spilled code) from
2199 // unspilled code, we need to reestablish a spilled frame at
2200 // this block.
2201 shadows[i]->Bind();
2202 frame_->SpillAll();
2203
2204 // Reload sp from the top handler, because some statements that
2205 // we break from (eg, for...in) may have left stuff on the
2206 // stack.
2207 __ mov(r3, Operand(handler_address));
2208 __ ldr(sp, MemOperand(r3));
2209 frame_->Forget(frame_->height() - handler_height);
2210
2211 // Unlink this handler and drop it from the frame. The next
2212 // handler address is currently on top of the frame.
2213 ASSERT(StackHandlerConstants::kNextOffset == 0);
2214 frame_->EmitPop(r1);
2215 __ str(r1, MemOperand(r3));
2216 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2217
2218 if (i == kReturnShadowIndex) {
2219 // If this label shadowed the function return, materialize the
2220 // return value on the stack.
2221 frame_->EmitPush(r0);
2222 } else {
2223 // Fake TOS for targets that shadowed breaks and continues.
2224 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2225 frame_->EmitPush(r0);
2226 }
2227 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2228 if (--nof_unlinks > 0) {
2229 // If this is not the last unlink block, jump around the next.
2230 finally_block.Jump();
2231 }
2232 }
2233 }
2234
2235 // --- Finally block ---
2236 finally_block.Bind();
2237
2238 // Push the state on the stack.
2239 frame_->EmitPush(r2);
2240
2241 // We keep two elements on the stack - the (possibly faked) result
2242 // and the state - while evaluating the finally block.
2243 //
2244 // Generate code for the statements in the finally block.
2245 VisitStatementsAndSpill(node->finally_block()->statements());
2246
2247 if (has_valid_frame()) {
2248 // Restore state and return value or faked TOS.
2249 frame_->EmitPop(r2);
2250 frame_->EmitPop(r0);
2251 }
2252
2253 // Generate code to jump to the right destination for all used
2254 // formerly shadowing targets. Deallocate each shadow target.
2255 for (int i = 0; i < shadows.length(); i++) {
2256 if (has_valid_frame() && shadows[i]->is_bound()) {
2257 JumpTarget* original = shadows[i]->other_target();
2258 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2259 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2260 JumpTarget skip;
2261 skip.Branch(ne);
2262 frame_->PrepareForReturn();
2263 original->Jump();
2264 skip.Bind();
2265 } else {
2266 original->Branch(eq);
2267 }
2268 }
2269 }
2270
2271 if (has_valid_frame()) {
2272 // Check if we need to rethrow the exception.
2273 JumpTarget exit;
2274 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2275 exit.Branch(ne);
2276
2277 // Rethrow exception.
2278 frame_->EmitPush(r0);
2279 frame_->CallRuntime(Runtime::kReThrow, 1);
2280
2281 // Done.
2282 exit.Bind();
2283 }
2284 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2285}
2286
2287
2288void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2289#ifdef DEBUG
2290 int original_height = frame_->height();
2291#endif
2292 VirtualFrame::SpilledScope spilled_scope;
2293 Comment cmnt(masm_, "[ DebuggerStatament");
2294 CodeForStatementPosition(node);
2295#ifdef ENABLE_DEBUGGER_SUPPORT
Leon Clarke4515c472010-02-03 11:58:03 +00002296 DebuggerStatementStub ces;
2297 frame_->CallStub(&ces, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002298#endif
2299 // Ignore the return value.
2300 ASSERT(frame_->height() == original_height);
2301}
2302
2303
2304void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2305 VirtualFrame::SpilledScope spilled_scope;
2306 ASSERT(boilerplate->IsBoilerplate());
2307
Steve Block3ce2e202009-11-05 08:53:23 +00002308 __ mov(r0, Operand(boilerplate));
Leon Clarkee46be812010-01-19 14:06:41 +00002309 // Use the fast case closure allocation code that allocates in new
2310 // space for nested functions that don't need literals cloning.
2311 if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
2312 FastNewClosureStub stub;
2313 frame_->EmitPush(r0);
2314 frame_->CallStub(&stub, 1);
2315 frame_->EmitPush(r0);
2316 } else {
2317 // Create a new closure.
2318 frame_->EmitPush(cp);
2319 frame_->EmitPush(r0);
2320 frame_->CallRuntime(Runtime::kNewClosure, 2);
2321 frame_->EmitPush(r0);
2322 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002323}
2324
2325
2326void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2327#ifdef DEBUG
2328 int original_height = frame_->height();
2329#endif
2330 VirtualFrame::SpilledScope spilled_scope;
2331 Comment cmnt(masm_, "[ FunctionLiteral");
2332
2333 // Build the function boilerplate and instantiate it.
Steve Blockd0582a62009-12-15 09:54:21 +00002334 Handle<JSFunction> boilerplate =
Andrei Popescu31002712010-02-23 13:46:05 +00002335 Compiler::BuildBoilerplate(node, script(), this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002336 // Check for stack-overflow exception.
2337 if (HasStackOverflow()) {
2338 ASSERT(frame_->height() == original_height);
2339 return;
2340 }
2341 InstantiateBoilerplate(boilerplate);
2342 ASSERT(frame_->height() == original_height + 1);
2343}
2344
2345
2346void CodeGenerator::VisitFunctionBoilerplateLiteral(
2347 FunctionBoilerplateLiteral* node) {
2348#ifdef DEBUG
2349 int original_height = frame_->height();
2350#endif
2351 VirtualFrame::SpilledScope spilled_scope;
2352 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2353 InstantiateBoilerplate(node->boilerplate());
2354 ASSERT(frame_->height() == original_height + 1);
2355}
2356
2357
2358void CodeGenerator::VisitConditional(Conditional* node) {
2359#ifdef DEBUG
2360 int original_height = frame_->height();
2361#endif
2362 VirtualFrame::SpilledScope spilled_scope;
2363 Comment cmnt(masm_, "[ Conditional");
2364 JumpTarget then;
2365 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002366 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002367 if (has_valid_frame()) {
2368 Branch(false, &else_);
2369 }
2370 if (has_valid_frame() || then.is_linked()) {
2371 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002372 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 }
2374 if (else_.is_linked()) {
2375 JumpTarget exit;
2376 if (has_valid_frame()) exit.Jump();
2377 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002378 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002379 if (exit.is_linked()) exit.Bind();
2380 }
2381 ASSERT(frame_->height() == original_height + 1);
2382}
2383
2384
2385void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2386 VirtualFrame::SpilledScope spilled_scope;
2387 if (slot->type() == Slot::LOOKUP) {
2388 ASSERT(slot->var()->is_dynamic());
2389
2390 JumpTarget slow;
2391 JumpTarget done;
2392
2393 // Generate fast-case code for variables that might be shadowed by
2394 // eval-introduced variables. Eval is used a lot without
2395 // introducing variables. In those cases, we do not want to
2396 // perform a runtime call for all variables in the scope
2397 // containing the eval.
2398 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
2399 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
2400 // If there was no control flow to slow, we can exit early.
2401 if (!slow.is_linked()) {
2402 frame_->EmitPush(r0);
2403 return;
2404 }
2405
2406 done.Jump();
2407
2408 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
2409 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
2410 // Only generate the fast case for locals that rewrite to slots.
2411 // This rules out argument loads.
2412 if (potential_slot != NULL) {
2413 __ ldr(r0,
2414 ContextSlotOperandCheckExtensions(potential_slot,
2415 r1,
2416 r2,
2417 &slow));
2418 if (potential_slot->var()->mode() == Variable::CONST) {
2419 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2420 __ cmp(r0, ip);
2421 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2422 }
2423 // There is always control flow to slow from
2424 // ContextSlotOperandCheckExtensions so we have to jump around
2425 // it.
2426 done.Jump();
2427 }
2428 }
2429
2430 slow.Bind();
2431 frame_->EmitPush(cp);
2432 __ mov(r0, Operand(slot->var()->name()));
2433 frame_->EmitPush(r0);
2434
2435 if (typeof_state == INSIDE_TYPEOF) {
2436 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2437 } else {
2438 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2439 }
2440
2441 done.Bind();
2442 frame_->EmitPush(r0);
2443
2444 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00002445 // Special handling for locals allocated in registers.
2446 __ ldr(r0, SlotOperand(slot, r2));
2447 frame_->EmitPush(r0);
2448 if (slot->var()->mode() == Variable::CONST) {
2449 // Const slots may contain 'the hole' value (the constant hasn't been
2450 // initialized yet) which needs to be converted into the 'undefined'
2451 // value.
2452 Comment cmnt(masm_, "[ Unhole const");
2453 frame_->EmitPop(r0);
2454 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2455 __ cmp(r0, ip);
2456 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2457 frame_->EmitPush(r0);
2458 }
2459 }
2460}
2461
2462
Leon Clarkee46be812010-01-19 14:06:41 +00002463void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2464 ASSERT(slot != NULL);
2465 if (slot->type() == Slot::LOOKUP) {
2466 ASSERT(slot->var()->is_dynamic());
2467
2468 // For now, just do a runtime call.
2469 frame_->EmitPush(cp);
2470 __ mov(r0, Operand(slot->var()->name()));
2471 frame_->EmitPush(r0);
2472
2473 if (init_state == CONST_INIT) {
2474 // Same as the case for a normal store, but ignores attribute
2475 // (e.g. READ_ONLY) of context slot so that we can initialize
2476 // const properties (introduced via eval("const foo = (some
2477 // expr);")). Also, uses the current function context instead of
2478 // the top context.
2479 //
2480 // Note that we must declare the foo upon entry of eval(), via a
2481 // context slot declaration, but we cannot initialize it at the
2482 // same time, because the const declaration may be at the end of
2483 // the eval code (sigh...) and the const variable may have been
2484 // used before (where its value is 'undefined'). Thus, we can only
2485 // do the initialization when we actually encounter the expression
2486 // and when the expression operands are defined and valid, and
2487 // thus we need the split into 2 operations: declaration of the
2488 // context slot followed by initialization.
2489 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2490 } else {
2491 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2492 }
2493 // Storing a variable must keep the (new) value on the expression
2494 // stack. This is necessary for compiling assignment expressions.
2495 frame_->EmitPush(r0);
2496
2497 } else {
2498 ASSERT(!slot->var()->is_dynamic());
2499
2500 JumpTarget exit;
2501 if (init_state == CONST_INIT) {
2502 ASSERT(slot->var()->mode() == Variable::CONST);
2503 // Only the first const initialization must be executed (the slot
2504 // still contains 'the hole' value). When the assignment is
2505 // executed, the code is identical to a normal store (see below).
2506 Comment cmnt(masm_, "[ Init const");
2507 __ ldr(r2, SlotOperand(slot, r2));
2508 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2509 __ cmp(r2, ip);
2510 exit.Branch(ne);
2511 }
2512
2513 // We must execute the store. Storing a variable must keep the
2514 // (new) value on the stack. This is necessary for compiling
2515 // assignment expressions.
2516 //
2517 // Note: We will reach here even with slot->var()->mode() ==
2518 // Variable::CONST because of const declarations which will
2519 // initialize consts to 'the hole' value and by doing so, end up
2520 // calling this code. r2 may be loaded with context; used below in
2521 // RecordWrite.
2522 frame_->EmitPop(r0);
2523 __ str(r0, SlotOperand(slot, r2));
2524 frame_->EmitPush(r0);
2525 if (slot->type() == Slot::CONTEXT) {
2526 // Skip write barrier if the written value is a smi.
2527 __ tst(r0, Operand(kSmiTagMask));
2528 exit.Branch(eq);
2529 // r2 is loaded with context when calling SlotOperand above.
2530 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2531 __ mov(r3, Operand(offset));
2532 __ RecordWrite(r2, r3, r1);
2533 }
2534 // If we definitely did not jump over the assignment, we do not need
2535 // to bind the exit label. Doing so can defeat peephole
2536 // optimization.
2537 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
2538 exit.Bind();
2539 }
2540 }
2541}
2542
2543
Steve Blocka7e24c12009-10-30 11:49:00 +00002544void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2545 TypeofState typeof_state,
2546 Register tmp,
2547 Register tmp2,
2548 JumpTarget* slow) {
2549 // Check that no extension objects have been created by calls to
2550 // eval from the current scope to the global scope.
2551 Register context = cp;
2552 Scope* s = scope();
2553 while (s != NULL) {
2554 if (s->num_heap_slots() > 0) {
2555 if (s->calls_eval()) {
2556 // Check that extension is NULL.
2557 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2558 __ tst(tmp2, tmp2);
2559 slow->Branch(ne);
2560 }
2561 // Load next context in chain.
2562 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2563 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2564 context = tmp;
2565 }
2566 // If no outer scope calls eval, we do not need to check more
2567 // context extensions.
2568 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2569 s = s->outer_scope();
2570 }
2571
2572 if (s->is_eval_scope()) {
2573 Label next, fast;
2574 if (!context.is(tmp)) {
2575 __ mov(tmp, Operand(context));
2576 }
2577 __ bind(&next);
2578 // Terminate at global context.
2579 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2580 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2581 __ cmp(tmp2, ip);
2582 __ b(eq, &fast);
2583 // Check that extension is NULL.
2584 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2585 __ tst(tmp2, tmp2);
2586 slow->Branch(ne);
2587 // Load next context in chain.
2588 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2589 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2590 __ b(&next);
2591 __ bind(&fast);
2592 }
2593
2594 // All extension objects were empty and it is safe to use a global
2595 // load IC call.
2596 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2597 // Load the global object.
2598 LoadGlobal();
2599 // Setup the name register.
Steve Blocka7e24c12009-10-30 11:49:00 +00002600 __ mov(r2, Operand(slot->var()->name()));
2601 // Call IC stub.
2602 if (typeof_state == INSIDE_TYPEOF) {
Leon Clarke4515c472010-02-03 11:58:03 +00002603 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002604 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00002605 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002606 }
2607
2608 // Drop the global object. The result is in r0.
2609 frame_->Drop();
2610}
2611
2612
2613void CodeGenerator::VisitSlot(Slot* node) {
2614#ifdef DEBUG
2615 int original_height = frame_->height();
2616#endif
2617 VirtualFrame::SpilledScope spilled_scope;
2618 Comment cmnt(masm_, "[ Slot");
Steve Blockd0582a62009-12-15 09:54:21 +00002619 LoadFromSlot(node, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00002620 ASSERT(frame_->height() == original_height + 1);
2621}
2622
2623
2624void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2625#ifdef DEBUG
2626 int original_height = frame_->height();
2627#endif
2628 VirtualFrame::SpilledScope spilled_scope;
2629 Comment cmnt(masm_, "[ VariableProxy");
2630
2631 Variable* var = node->var();
2632 Expression* expr = var->rewrite();
2633 if (expr != NULL) {
2634 Visit(expr);
2635 } else {
2636 ASSERT(var->is_global());
2637 Reference ref(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002638 ref.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002639 }
2640 ASSERT(frame_->height() == original_height + 1);
2641}
2642
2643
2644void CodeGenerator::VisitLiteral(Literal* node) {
2645#ifdef DEBUG
2646 int original_height = frame_->height();
2647#endif
2648 VirtualFrame::SpilledScope spilled_scope;
2649 Comment cmnt(masm_, "[ Literal");
2650 __ mov(r0, Operand(node->handle()));
2651 frame_->EmitPush(r0);
2652 ASSERT(frame_->height() == original_height + 1);
2653}
2654
2655
2656void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2657#ifdef DEBUG
2658 int original_height = frame_->height();
2659#endif
2660 VirtualFrame::SpilledScope spilled_scope;
2661 Comment cmnt(masm_, "[ RexExp Literal");
2662
2663 // Retrieve the literal array and check the allocated entry.
2664
2665 // Load the function of this activation.
2666 __ ldr(r1, frame_->Function());
2667
2668 // Load the literals array of the function.
2669 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
2670
2671 // Load the literal at the ast saved index.
2672 int literal_offset =
2673 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2674 __ ldr(r2, FieldMemOperand(r1, literal_offset));
2675
2676 JumpTarget done;
2677 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2678 __ cmp(r2, ip);
2679 done.Branch(ne);
2680
2681 // If the entry is undefined we call the runtime system to computed
2682 // the literal.
2683 frame_->EmitPush(r1); // literal array (0)
2684 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
2685 frame_->EmitPush(r0); // literal index (1)
2686 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
2687 frame_->EmitPush(r0);
2688 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
2689 frame_->EmitPush(r0);
2690 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2691 __ mov(r2, Operand(r0));
2692
2693 done.Bind();
2694 // Push the literal.
2695 frame_->EmitPush(r2);
2696 ASSERT(frame_->height() == original_height + 1);
2697}
2698
2699
Steve Blocka7e24c12009-10-30 11:49:00 +00002700void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2701#ifdef DEBUG
2702 int original_height = frame_->height();
2703#endif
2704 VirtualFrame::SpilledScope spilled_scope;
2705 Comment cmnt(masm_, "[ ObjectLiteral");
2706
Steve Blocka7e24c12009-10-30 11:49:00 +00002707 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002708 __ ldr(r2, frame_->Function());
2709 // Literal array.
2710 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2711 // Literal index.
2712 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2713 // Constant properties.
2714 __ mov(r0, Operand(node->constant_properties()));
2715 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2716 if (node->depth() > 1) {
2717 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
2718 } else {
2719 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002720 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002721 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002722 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002723
2724 for (int i = 0; i < node->properties()->length(); i++) {
2725 ObjectLiteral::Property* property = node->properties()->at(i);
2726 Literal* key = property->key();
2727 Expression* value = property->value();
2728 switch (property->kind()) {
2729 case ObjectLiteral::Property::CONSTANT:
2730 break;
2731 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2732 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2733 // else fall through
2734 case ObjectLiteral::Property::COMPUTED: // fall through
2735 case ObjectLiteral::Property::PROTOTYPE: {
2736 frame_->EmitPush(r0); // dup the result
2737 LoadAndSpill(key);
2738 LoadAndSpill(value);
2739 frame_->CallRuntime(Runtime::kSetProperty, 3);
2740 // restore r0
2741 __ ldr(r0, frame_->Top());
2742 break;
2743 }
2744 case ObjectLiteral::Property::SETTER: {
2745 frame_->EmitPush(r0);
2746 LoadAndSpill(key);
2747 __ mov(r0, Operand(Smi::FromInt(1)));
2748 frame_->EmitPush(r0);
2749 LoadAndSpill(value);
2750 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2751 __ ldr(r0, frame_->Top());
2752 break;
2753 }
2754 case ObjectLiteral::Property::GETTER: {
2755 frame_->EmitPush(r0);
2756 LoadAndSpill(key);
2757 __ mov(r0, Operand(Smi::FromInt(0)));
2758 frame_->EmitPush(r0);
2759 LoadAndSpill(value);
2760 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2761 __ ldr(r0, frame_->Top());
2762 break;
2763 }
2764 }
2765 }
2766 ASSERT(frame_->height() == original_height + 1);
2767}
2768
2769
Steve Blocka7e24c12009-10-30 11:49:00 +00002770void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2771#ifdef DEBUG
2772 int original_height = frame_->height();
2773#endif
2774 VirtualFrame::SpilledScope spilled_scope;
2775 Comment cmnt(masm_, "[ ArrayLiteral");
2776
Steve Blocka7e24c12009-10-30 11:49:00 +00002777 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00002778 __ ldr(r2, frame_->Function());
2779 // Literals array.
2780 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
2781 // Literal index.
2782 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
2783 // Constant elements.
2784 __ mov(r0, Operand(node->constant_elements()));
2785 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
2786 if (node->depth() > 1) {
2787 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2788 } else {
2789 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002791 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00002792 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00002793
2794 // Generate code to set the elements in the array that are not
2795 // literals.
2796 for (int i = 0; i < node->values()->length(); i++) {
2797 Expression* value = node->values()->at(i);
2798
2799 // If value is a literal the property value is already set in the
2800 // boilerplate object.
2801 if (value->AsLiteral() != NULL) continue;
2802 // If value is a materialized literal the property value is already set
2803 // in the boilerplate object if it is simple.
2804 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2805
2806 // The property must be set by generated code.
2807 LoadAndSpill(value);
2808 frame_->EmitPop(r0);
2809
2810 // Fetch the object literal.
2811 __ ldr(r1, frame_->Top());
2812 // Get the elements array.
2813 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
2814
2815 // Write to the indexed properties array.
2816 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2817 __ str(r0, FieldMemOperand(r1, offset));
2818
2819 // Update the write barrier for the array address.
2820 __ mov(r3, Operand(offset));
2821 __ RecordWrite(r1, r3, r2);
2822 }
2823 ASSERT(frame_->height() == original_height + 1);
2824}
2825
2826
2827void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2828#ifdef DEBUG
2829 int original_height = frame_->height();
2830#endif
2831 VirtualFrame::SpilledScope spilled_scope;
2832 // Call runtime routine to allocate the catch extension object and
2833 // assign the exception value to the catch variable.
2834 Comment cmnt(masm_, "[ CatchExtensionObject");
2835 LoadAndSpill(node->key());
2836 LoadAndSpill(node->value());
2837 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2838 frame_->EmitPush(r0);
2839 ASSERT(frame_->height() == original_height + 1);
2840}
2841
2842
2843void CodeGenerator::VisitAssignment(Assignment* node) {
2844#ifdef DEBUG
2845 int original_height = frame_->height();
2846#endif
2847 VirtualFrame::SpilledScope spilled_scope;
2848 Comment cmnt(masm_, "[ Assignment");
2849
Leon Clarked91b9f72010-01-27 17:25:45 +00002850 { Reference target(this, node->target(), node->is_compound());
Steve Blocka7e24c12009-10-30 11:49:00 +00002851 if (target.is_illegal()) {
2852 // Fool the virtual frame into thinking that we left the assignment's
2853 // value on the frame.
2854 __ mov(r0, Operand(Smi::FromInt(0)));
2855 frame_->EmitPush(r0);
2856 ASSERT(frame_->height() == original_height + 1);
2857 return;
2858 }
2859
2860 if (node->op() == Token::ASSIGN ||
2861 node->op() == Token::INIT_VAR ||
2862 node->op() == Token::INIT_CONST) {
2863 LoadAndSpill(node->value());
2864
Leon Clarked91b9f72010-01-27 17:25:45 +00002865 } else { // Assignment is a compound assignment.
Steve Blocka7e24c12009-10-30 11:49:00 +00002866 // Get the old value of the lhs.
Steve Blockd0582a62009-12-15 09:54:21 +00002867 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002868 Literal* literal = node->value()->AsLiteral();
2869 bool overwrite =
2870 (node->value()->AsBinaryOperation() != NULL &&
2871 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2872 if (literal != NULL && literal->handle()->IsSmi()) {
2873 SmiOperation(node->binary_op(),
2874 literal->handle(),
2875 false,
2876 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2877 frame_->EmitPush(r0);
2878
2879 } else {
2880 LoadAndSpill(node->value());
2881 GenericBinaryOperation(node->binary_op(),
2882 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2883 frame_->EmitPush(r0);
2884 }
2885 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002886 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2887 if (var != NULL &&
2888 (var->mode() == Variable::CONST) &&
2889 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2890 // Assignment ignored - leave the value on the stack.
Leon Clarked91b9f72010-01-27 17:25:45 +00002891 UnloadReference(&target);
Steve Blocka7e24c12009-10-30 11:49:00 +00002892 } else {
2893 CodeForSourcePosition(node->position());
2894 if (node->op() == Token::INIT_CONST) {
2895 // Dynamic constant initializations must use the function context
2896 // and initialize the actual constant declared. Dynamic variable
2897 // initializations are simply assignments and use SetValue.
2898 target.SetValue(CONST_INIT);
2899 } else {
2900 target.SetValue(NOT_CONST_INIT);
2901 }
2902 }
2903 }
2904 ASSERT(frame_->height() == original_height + 1);
2905}
2906
2907
2908void CodeGenerator::VisitThrow(Throw* node) {
2909#ifdef DEBUG
2910 int original_height = frame_->height();
2911#endif
2912 VirtualFrame::SpilledScope spilled_scope;
2913 Comment cmnt(masm_, "[ Throw");
2914
2915 LoadAndSpill(node->exception());
2916 CodeForSourcePosition(node->position());
2917 frame_->CallRuntime(Runtime::kThrow, 1);
2918 frame_->EmitPush(r0);
2919 ASSERT(frame_->height() == original_height + 1);
2920}
2921
2922
2923void CodeGenerator::VisitProperty(Property* node) {
2924#ifdef DEBUG
2925 int original_height = frame_->height();
2926#endif
2927 VirtualFrame::SpilledScope spilled_scope;
2928 Comment cmnt(masm_, "[ Property");
2929
2930 { Reference property(this, node);
Steve Blockd0582a62009-12-15 09:54:21 +00002931 property.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00002932 }
2933 ASSERT(frame_->height() == original_height + 1);
2934}
2935
2936
2937void CodeGenerator::VisitCall(Call* node) {
2938#ifdef DEBUG
2939 int original_height = frame_->height();
2940#endif
2941 VirtualFrame::SpilledScope spilled_scope;
2942 Comment cmnt(masm_, "[ Call");
2943
2944 Expression* function = node->expression();
2945 ZoneList<Expression*>* args = node->arguments();
2946
2947 // Standard function call.
2948 // Check if the function is a variable or a property.
2949 Variable* var = function->AsVariableProxy()->AsVariable();
2950 Property* property = function->AsProperty();
2951
2952 // ------------------------------------------------------------------------
2953 // Fast-case: Use inline caching.
2954 // ---
2955 // According to ECMA-262, section 11.2.3, page 44, the function to call
2956 // must be resolved after the arguments have been evaluated. The IC code
2957 // automatically handles this by loading the arguments before the function
2958 // is resolved in cache misses (this also holds for megamorphic calls).
2959 // ------------------------------------------------------------------------
2960
2961 if (var != NULL && var->is_possibly_eval()) {
2962 // ----------------------------------
2963 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2964 // ----------------------------------
2965
2966 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2967 // resolve the function we need to call and the receiver of the
2968 // call. Then we call the resolved function using the given
2969 // arguments.
2970 // Prepare stack for call to resolved function.
2971 LoadAndSpill(function);
2972 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2973 frame_->EmitPush(r2); // Slot for receiver
2974 int arg_count = args->length();
2975 for (int i = 0; i < arg_count; i++) {
2976 LoadAndSpill(args->at(i));
2977 }
2978
2979 // Prepare stack for call to ResolvePossiblyDirectEval.
2980 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
2981 frame_->EmitPush(r1);
2982 if (arg_count > 0) {
2983 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
2984 frame_->EmitPush(r1);
2985 } else {
2986 frame_->EmitPush(r2);
2987 }
2988
Leon Clarkee46be812010-01-19 14:06:41 +00002989 // Push the receiver.
2990 __ ldr(r1, frame_->Receiver());
2991 frame_->EmitPush(r1);
2992
Steve Blocka7e24c12009-10-30 11:49:00 +00002993 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00002994 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00002995
2996 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00002997 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00002998 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
2999
3000 // Call the function.
3001 CodeForSourcePosition(node->position());
3002
3003 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00003004 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003005 frame_->CallStub(&call_function, arg_count + 1);
3006
3007 __ ldr(cp, frame_->Context());
3008 // Remove the function from the stack.
3009 frame_->Drop();
3010 frame_->EmitPush(r0);
3011
3012 } else if (var != NULL && !var->is_this() && var->is_global()) {
3013 // ----------------------------------
3014 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3015 // ----------------------------------
3016
3017 // Push the name of the function and the receiver onto the stack.
3018 __ mov(r0, Operand(var->name()));
3019 frame_->EmitPush(r0);
3020
3021 // Pass the global object as the receiver and let the IC stub
3022 // patch the stack to use the global proxy as 'this' in the
3023 // invoked function.
3024 LoadGlobal();
3025
3026 // Load the arguments.
3027 int arg_count = args->length();
3028 for (int i = 0; i < arg_count; i++) {
3029 LoadAndSpill(args->at(i));
3030 }
3031
3032 // Setup the receiver register and call the IC initialization code.
3033 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3034 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3035 CodeForSourcePosition(node->position());
3036 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3037 arg_count + 1);
3038 __ ldr(cp, frame_->Context());
3039 // Remove the function from the stack.
3040 frame_->Drop();
3041 frame_->EmitPush(r0);
3042
3043 } else if (var != NULL && var->slot() != NULL &&
3044 var->slot()->type() == Slot::LOOKUP) {
3045 // ----------------------------------
3046 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
3047 // ----------------------------------
3048
3049 // Load the function
3050 frame_->EmitPush(cp);
3051 __ mov(r0, Operand(var->name()));
3052 frame_->EmitPush(r0);
3053 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3054 // r0: slot value; r1: receiver
3055
3056 // Load the receiver.
3057 frame_->EmitPush(r0); // function
3058 frame_->EmitPush(r1); // receiver
3059
3060 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003061 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003062 frame_->EmitPush(r0);
3063
3064 } else if (property != NULL) {
3065 // Check if the key is a literal string.
3066 Literal* literal = property->key()->AsLiteral();
3067
3068 if (literal != NULL && literal->handle()->IsSymbol()) {
3069 // ------------------------------------------------------------------
3070 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3071 // ------------------------------------------------------------------
3072
3073 // Push the name of the function and the receiver onto the stack.
3074 __ mov(r0, Operand(literal->handle()));
3075 frame_->EmitPush(r0);
3076 LoadAndSpill(property->obj());
3077
3078 // Load the arguments.
3079 int arg_count = args->length();
3080 for (int i = 0; i < arg_count; i++) {
3081 LoadAndSpill(args->at(i));
3082 }
3083
3084 // Set the receiver register and call the IC initialization code.
3085 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3086 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3087 CodeForSourcePosition(node->position());
3088 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3089 __ ldr(cp, frame_->Context());
3090
3091 // Remove the function from the stack.
3092 frame_->Drop();
3093
3094 frame_->EmitPush(r0); // push after get rid of function from the stack
3095
3096 } else {
3097 // -------------------------------------------
3098 // JavaScript example: 'array[index](1, 2, 3)'
3099 // -------------------------------------------
3100
Leon Clarked91b9f72010-01-27 17:25:45 +00003101 LoadAndSpill(property->obj());
3102 LoadAndSpill(property->key());
3103 EmitKeyedLoad(false);
3104 frame_->Drop(); // key
3105 // Put the function below the receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003106 if (property->is_synthetic()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00003107 // Use the global receiver.
3108 frame_->Drop();
3109 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 LoadGlobalReceiver(r0);
3111 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00003112 frame_->EmitPop(r1); // receiver
3113 frame_->EmitPush(r0); // function
3114 frame_->EmitPush(r1); // receiver
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 }
3116
3117 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003118 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003119 frame_->EmitPush(r0);
3120 }
3121
3122 } else {
3123 // ----------------------------------
3124 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3125 // ----------------------------------
3126
3127 // Load the function.
3128 LoadAndSpill(function);
3129
3130 // Pass the global proxy as the receiver.
3131 LoadGlobalReceiver(r0);
3132
3133 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003134 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003135 frame_->EmitPush(r0);
3136 }
3137 ASSERT(frame_->height() == original_height + 1);
3138}
3139
3140
3141void CodeGenerator::VisitCallNew(CallNew* node) {
3142#ifdef DEBUG
3143 int original_height = frame_->height();
3144#endif
3145 VirtualFrame::SpilledScope spilled_scope;
3146 Comment cmnt(masm_, "[ CallNew");
3147
3148 // According to ECMA-262, section 11.2.2, page 44, the function
3149 // expression in new calls must be evaluated before the
3150 // arguments. This is different from ordinary calls, where the
3151 // actual function to call is resolved after the arguments have been
3152 // evaluated.
3153
3154 // Compute function to call and use the global object as the
3155 // receiver. There is no need to use the global proxy here because
3156 // it will always be replaced with a newly allocated object.
3157 LoadAndSpill(node->expression());
3158 LoadGlobal();
3159
3160 // Push the arguments ("left-to-right") on the stack.
3161 ZoneList<Expression*>* args = node->arguments();
3162 int arg_count = args->length();
3163 for (int i = 0; i < arg_count; i++) {
3164 LoadAndSpill(args->at(i));
3165 }
3166
3167 // r0: the number of arguments.
Steve Blocka7e24c12009-10-30 11:49:00 +00003168 __ mov(r0, Operand(arg_count));
Steve Blocka7e24c12009-10-30 11:49:00 +00003169 // Load the function into r1 as per calling convention.
Steve Blocka7e24c12009-10-30 11:49:00 +00003170 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3171
3172 // Call the construct call builtin that handles allocation and
3173 // constructor invocation.
3174 CodeForSourcePosition(node->position());
3175 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
Leon Clarke4515c472010-02-03 11:58:03 +00003176 frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003177
3178 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3179 __ str(r0, frame_->Top());
3180 ASSERT(frame_->height() == original_height + 1);
3181}
3182
3183
3184void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3185 VirtualFrame::SpilledScope spilled_scope;
3186 ASSERT(args->length() == 1);
3187 JumpTarget leave, null, function, non_function_constructor;
3188
3189 // Load the object into r0.
3190 LoadAndSpill(args->at(0));
3191 frame_->EmitPop(r0);
3192
3193 // If the object is a smi, we return null.
3194 __ tst(r0, Operand(kSmiTagMask));
3195 null.Branch(eq);
3196
3197 // Check that the object is a JS object but take special care of JS
3198 // functions to make sure they have 'Function' as their class.
3199 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3200 null.Branch(lt);
3201
3202 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3203 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3204 // LAST_JS_OBJECT_TYPE.
3205 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3206 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3207 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3208 function.Branch(eq);
3209
3210 // Check if the constructor in the map is a function.
3211 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3212 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3213 non_function_constructor.Branch(ne);
3214
3215 // The r0 register now contains the constructor function. Grab the
3216 // instance class name from there.
3217 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3218 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3219 frame_->EmitPush(r0);
3220 leave.Jump();
3221
3222 // Functions have class 'Function'.
3223 function.Bind();
3224 __ mov(r0, Operand(Factory::function_class_symbol()));
3225 frame_->EmitPush(r0);
3226 leave.Jump();
3227
3228 // Objects with a non-function constructor have class 'Object'.
3229 non_function_constructor.Bind();
3230 __ mov(r0, Operand(Factory::Object_symbol()));
3231 frame_->EmitPush(r0);
3232 leave.Jump();
3233
3234 // Non-JS objects have class null.
3235 null.Bind();
3236 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3237 frame_->EmitPush(r0);
3238
3239 // All done.
3240 leave.Bind();
3241}
3242
3243
3244void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
3245 VirtualFrame::SpilledScope spilled_scope;
3246 ASSERT(args->length() == 1);
3247 JumpTarget leave;
3248 LoadAndSpill(args->at(0));
3249 frame_->EmitPop(r0); // r0 contains object.
3250 // if (object->IsSmi()) return the object.
3251 __ tst(r0, Operand(kSmiTagMask));
3252 leave.Branch(eq);
3253 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3254 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
3255 leave.Branch(ne);
3256 // Load the value.
3257 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
3258 leave.Bind();
3259 frame_->EmitPush(r0);
3260}
3261
3262
3263void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
3264 VirtualFrame::SpilledScope spilled_scope;
3265 ASSERT(args->length() == 2);
3266 JumpTarget leave;
3267 LoadAndSpill(args->at(0)); // Load the object.
3268 LoadAndSpill(args->at(1)); // Load the value.
3269 frame_->EmitPop(r0); // r0 contains value
3270 frame_->EmitPop(r1); // r1 contains object
3271 // if (object->IsSmi()) return object.
3272 __ tst(r1, Operand(kSmiTagMask));
3273 leave.Branch(eq);
3274 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
3275 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
3276 leave.Branch(ne);
3277 // Store the value.
3278 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
3279 // Update the write barrier.
3280 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
3281 __ RecordWrite(r1, r2, r3);
3282 // Leave.
3283 leave.Bind();
3284 frame_->EmitPush(r0);
3285}
3286
3287
3288void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3289 VirtualFrame::SpilledScope spilled_scope;
3290 ASSERT(args->length() == 1);
3291 LoadAndSpill(args->at(0));
3292 frame_->EmitPop(r0);
3293 __ tst(r0, Operand(kSmiTagMask));
3294 cc_reg_ = eq;
3295}
3296
3297
3298void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3299 VirtualFrame::SpilledScope spilled_scope;
3300 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
3301 ASSERT_EQ(args->length(), 3);
3302#ifdef ENABLE_LOGGING_AND_PROFILING
3303 if (ShouldGenerateLog(args->at(0))) {
3304 LoadAndSpill(args->at(1));
3305 LoadAndSpill(args->at(2));
3306 __ CallRuntime(Runtime::kLog, 2);
3307 }
3308#endif
3309 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3310 frame_->EmitPush(r0);
3311}
3312
3313
3314void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3315 VirtualFrame::SpilledScope spilled_scope;
3316 ASSERT(args->length() == 1);
3317 LoadAndSpill(args->at(0));
3318 frame_->EmitPop(r0);
3319 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3320 cc_reg_ = eq;
3321}
3322
3323
3324// This should generate code that performs a charCodeAt() call or returns
3325// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3326// It is not yet implemented on ARM, so it always goes to the slow case.
3327void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3328 VirtualFrame::SpilledScope spilled_scope;
3329 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003330 Comment(masm_, "[ GenerateFastCharCodeAt");
3331
3332 LoadAndSpill(args->at(0));
3333 LoadAndSpill(args->at(1));
3334 frame_->EmitPop(r0); // Index.
3335 frame_->EmitPop(r1); // String.
3336
3337 Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
3338
3339 __ tst(r1, Operand(kSmiTagMask));
3340 __ b(eq, &slow); // The 'string' was a Smi.
3341
3342 ASSERT(kSmiTag == 0);
3343 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3344 __ b(ne, &slow); // The index was negative or not a Smi.
3345
3346 __ bind(&try_again_with_new_string);
3347 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
3348 __ b(ge, &slow);
3349
3350 // Now r2 has the string type.
3351 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
3352 // Now r3 has the length of the string. Compare with the index.
3353 __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
3354 __ b(le, &slow);
3355
3356 // Here we know the index is in range. Check that string is sequential.
3357 ASSERT_EQ(0, kSeqStringTag);
3358 __ tst(r2, Operand(kStringRepresentationMask));
3359 __ b(ne, &not_a_flat_string);
3360
3361 // Check whether it is an ASCII string.
3362 ASSERT_EQ(0, kTwoByteStringTag);
3363 __ tst(r2, Operand(kStringEncodingMask));
3364 __ b(ne, &ascii_string);
3365
3366 // 2-byte string. We can add without shifting since the Smi tag size is the
3367 // log2 of the number of bytes in a two-byte character.
3368 ASSERT_EQ(1, kSmiTagSize);
3369 ASSERT_EQ(0, kSmiShiftSize);
3370 __ add(r1, r1, Operand(r0));
3371 __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
3372 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3373 __ jmp(&end);
3374
3375 __ bind(&ascii_string);
3376 __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
3377 __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
3378 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
3379 __ jmp(&end);
3380
3381 __ bind(&not_a_flat_string);
3382 __ and_(r2, r2, Operand(kStringRepresentationMask));
3383 __ cmp(r2, Operand(kConsStringTag));
3384 __ b(ne, &slow);
3385
3386 // ConsString.
3387 // Check that the right hand side is the empty string (ie if this is really a
3388 // flat string in a cons string). If that is not the case we would rather go
3389 // to the runtime system now, to flatten the string.
3390 __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
3391 __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
3392 __ cmp(r2, Operand(r3));
3393 __ b(ne, &slow);
3394
3395 // Get the first of the two strings.
3396 __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
3397 __ jmp(&try_again_with_new_string);
3398
3399 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00003401
3402 __ bind(&end);
Steve Blocka7e24c12009-10-30 11:49:00 +00003403 frame_->EmitPush(r0);
3404}
3405
3406
3407void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3408 VirtualFrame::SpilledScope spilled_scope;
3409 ASSERT(args->length() == 1);
3410 LoadAndSpill(args->at(0));
3411 JumpTarget answer;
3412 // We need the CC bits to come out as not_equal in the case where the
3413 // object is a smi. This can't be done with the usual test opcode so
3414 // we use XOR to get the right CC bits.
3415 frame_->EmitPop(r0);
3416 __ and_(r1, r0, Operand(kSmiTagMask));
3417 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
3418 answer.Branch(ne);
3419 // It is a heap object - get the map. Check if the object is a JS array.
3420 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
3421 answer.Bind();
3422 cc_reg_ = eq;
3423}
3424
3425
Steve Blockd0582a62009-12-15 09:54:21 +00003426void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3427 // This generates a fast version of:
3428 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3429 VirtualFrame::SpilledScope spilled_scope;
3430 ASSERT(args->length() == 1);
3431 LoadAndSpill(args->at(0));
3432 frame_->EmitPop(r1);
3433 __ tst(r1, Operand(kSmiTagMask));
3434 false_target()->Branch(eq);
3435
3436 __ LoadRoot(ip, Heap::kNullValueRootIndex);
3437 __ cmp(r1, ip);
3438 true_target()->Branch(eq);
3439
3440 Register map_reg = r2;
3441 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
3442 // Undetectable objects behave like undefined when tested with typeof.
3443 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
3444 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
3445 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
3446 false_target()->Branch(eq);
3447
3448 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
3449 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
3450 false_target()->Branch(lt);
3451 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
3452 cc_reg_ = le;
3453}
3454
3455
3456void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3457 // This generates a fast version of:
3458 // (%_ClassOf(arg) === 'Function')
3459 VirtualFrame::SpilledScope spilled_scope;
3460 ASSERT(args->length() == 1);
3461 LoadAndSpill(args->at(0));
3462 frame_->EmitPop(r0);
3463 __ tst(r0, Operand(kSmiTagMask));
3464 false_target()->Branch(eq);
3465 Register map_reg = r2;
3466 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
3467 cc_reg_ = eq;
3468}
3469
3470
Leon Clarked91b9f72010-01-27 17:25:45 +00003471void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3472 VirtualFrame::SpilledScope spilled_scope;
3473 ASSERT(args->length() == 1);
3474 LoadAndSpill(args->at(0));
3475 frame_->EmitPop(r0);
3476 __ tst(r0, Operand(kSmiTagMask));
3477 false_target()->Branch(eq);
3478 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3479 __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
3480 __ tst(r1, Operand(1 << Map::kIsUndetectable));
3481 cc_reg_ = ne;
3482}
3483
3484
Steve Blocka7e24c12009-10-30 11:49:00 +00003485void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3486 VirtualFrame::SpilledScope spilled_scope;
3487 ASSERT(args->length() == 0);
3488
3489 // Get the frame pointer for the calling frame.
3490 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3491
3492 // Skip the arguments adaptor frame if it exists.
3493 Label check_frame_marker;
3494 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
3495 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3496 __ b(ne, &check_frame_marker);
3497 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
3498
3499 // Check the marker in the calling frame.
3500 __ bind(&check_frame_marker);
3501 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
3502 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3503 cc_reg_ = eq;
3504}
3505
3506
3507void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3508 VirtualFrame::SpilledScope spilled_scope;
3509 ASSERT(args->length() == 0);
3510
3511 // Seed the result with the formal parameters count, which will be used
3512 // in case no arguments adaptor frame is found below the current frame.
Andrei Popescu31002712010-02-23 13:46:05 +00003513 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00003514
3515 // Call the shared stub to get to the arguments.length.
3516 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3517 frame_->CallStub(&stub, 0);
3518 frame_->EmitPush(r0);
3519}
3520
3521
3522void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3523 VirtualFrame::SpilledScope spilled_scope;
3524 ASSERT(args->length() == 1);
3525
3526 // Satisfy contract with ArgumentsAccessStub:
3527 // Load the key into r1 and the formal parameters count into r0.
3528 LoadAndSpill(args->at(0));
3529 frame_->EmitPop(r1);
Andrei Popescu31002712010-02-23 13:46:05 +00003530 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00003531
3532 // Call the shared stub to get to arguments[key].
3533 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3534 frame_->CallStub(&stub, 0);
3535 frame_->EmitPush(r0);
3536}
3537
3538
3539void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3540 VirtualFrame::SpilledScope spilled_scope;
3541 ASSERT(args->length() == 0);
3542 __ Call(ExternalReference::random_positive_smi_function().address(),
3543 RelocInfo::RUNTIME_ENTRY);
3544 frame_->EmitPush(r0);
3545}
3546
3547
Steve Blockd0582a62009-12-15 09:54:21 +00003548void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3549 ASSERT_EQ(2, args->length());
3550
3551 Load(args->at(0));
3552 Load(args->at(1));
3553
Andrei Popescu31002712010-02-23 13:46:05 +00003554 StringAddStub stub(NO_STRING_ADD_FLAGS);
3555 frame_->CallStub(&stub, 2);
Steve Blockd0582a62009-12-15 09:54:21 +00003556 frame_->EmitPush(r0);
3557}
3558
3559
Leon Clarkee46be812010-01-19 14:06:41 +00003560void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
3561 ASSERT_EQ(3, args->length());
3562
3563 Load(args->at(0));
3564 Load(args->at(1));
3565 Load(args->at(2));
3566
Andrei Popescu31002712010-02-23 13:46:05 +00003567 SubStringStub stub;
3568 frame_->CallStub(&stub, 3);
Leon Clarkee46be812010-01-19 14:06:41 +00003569 frame_->EmitPush(r0);
3570}
3571
3572
3573void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
3574 ASSERT_EQ(2, args->length());
3575
3576 Load(args->at(0));
3577 Load(args->at(1));
3578
Leon Clarked91b9f72010-01-27 17:25:45 +00003579 StringCompareStub stub;
3580 frame_->CallStub(&stub, 2);
Leon Clarkee46be812010-01-19 14:06:41 +00003581 frame_->EmitPush(r0);
3582}
3583
3584
3585void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
3586 ASSERT_EQ(4, args->length());
3587
3588 Load(args->at(0));
3589 Load(args->at(1));
3590 Load(args->at(2));
3591 Load(args->at(3));
3592
3593 frame_->CallRuntime(Runtime::kRegExpExec, 4);
3594 frame_->EmitPush(r0);
3595}
3596
3597
Steve Blocka7e24c12009-10-30 11:49:00 +00003598void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3599 VirtualFrame::SpilledScope spilled_scope;
3600 ASSERT(args->length() == 2);
3601
3602 // Load the two objects into registers and perform the comparison.
3603 LoadAndSpill(args->at(0));
3604 LoadAndSpill(args->at(1));
3605 frame_->EmitPop(r0);
3606 frame_->EmitPop(r1);
3607 __ cmp(r0, Operand(r1));
3608 cc_reg_ = eq;
3609}
3610
3611
3612void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
3613#ifdef DEBUG
3614 int original_height = frame_->height();
3615#endif
3616 VirtualFrame::SpilledScope spilled_scope;
3617 if (CheckForInlineRuntimeCall(node)) {
3618 ASSERT((has_cc() && frame_->height() == original_height) ||
3619 (!has_cc() && frame_->height() == original_height + 1));
3620 return;
3621 }
3622
3623 ZoneList<Expression*>* args = node->arguments();
3624 Comment cmnt(masm_, "[ CallRuntime");
3625 Runtime::Function* function = node->function();
3626
3627 if (function == NULL) {
3628 // Prepare stack for calling JS runtime function.
3629 __ mov(r0, Operand(node->name()));
3630 frame_->EmitPush(r0);
3631 // Push the builtins object found in the current global object.
3632 __ ldr(r1, GlobalObject());
3633 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
3634 frame_->EmitPush(r0);
3635 }
3636
3637 // Push the arguments ("left-to-right").
3638 int arg_count = args->length();
3639 for (int i = 0; i < arg_count; i++) {
3640 LoadAndSpill(args->at(i));
3641 }
3642
3643 if (function == NULL) {
3644 // Call the JS runtime function.
3645 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3646 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3647 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3648 __ ldr(cp, frame_->Context());
3649 frame_->Drop();
3650 frame_->EmitPush(r0);
3651 } else {
3652 // Call the C runtime function.
3653 frame_->CallRuntime(function, arg_count);
3654 frame_->EmitPush(r0);
3655 }
3656 ASSERT(frame_->height() == original_height + 1);
3657}
3658
3659
3660void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3661#ifdef DEBUG
3662 int original_height = frame_->height();
3663#endif
3664 VirtualFrame::SpilledScope spilled_scope;
3665 Comment cmnt(masm_, "[ UnaryOperation");
3666
3667 Token::Value op = node->op();
3668
3669 if (op == Token::NOT) {
3670 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003671 false_target(),
3672 true_target(),
3673 true);
3674 // LoadCondition may (and usually does) leave a test and branch to
3675 // be emitted by the caller. In that case, negate the condition.
3676 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
3677
3678 } else if (op == Token::DELETE) {
3679 Property* property = node->expression()->AsProperty();
3680 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3681 if (property != NULL) {
3682 LoadAndSpill(property->obj());
3683 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00003684 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003685
3686 } else if (variable != NULL) {
3687 Slot* slot = variable->slot();
3688 if (variable->is_global()) {
3689 LoadGlobal();
3690 __ mov(r0, Operand(variable->name()));
3691 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003692 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003693
3694 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3695 // lookup the context holding the named variable
3696 frame_->EmitPush(cp);
3697 __ mov(r0, Operand(variable->name()));
3698 frame_->EmitPush(r0);
3699 frame_->CallRuntime(Runtime::kLookupContext, 2);
3700 // r0: context
3701 frame_->EmitPush(r0);
3702 __ mov(r0, Operand(variable->name()));
3703 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003704 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00003705
3706 } else {
3707 // Default: Result of deleting non-global, not dynamically
3708 // introduced variables is false.
3709 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3710 }
3711
3712 } else {
3713 // Default: Result of deleting expressions is true.
3714 LoadAndSpill(node->expression()); // may have side-effects
3715 frame_->Drop();
3716 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3717 }
3718 frame_->EmitPush(r0);
3719
3720 } else if (op == Token::TYPEOF) {
3721 // Special case for loading the typeof expression; see comment on
3722 // LoadTypeofExpression().
3723 LoadTypeofExpression(node->expression());
3724 frame_->CallRuntime(Runtime::kTypeof, 1);
3725 frame_->EmitPush(r0); // r0 has result
3726
3727 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00003728 bool overwrite =
3729 (node->expression()->AsBinaryOperation() != NULL &&
3730 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00003731 LoadAndSpill(node->expression());
3732 frame_->EmitPop(r0);
3733 switch (op) {
3734 case Token::NOT:
3735 case Token::DELETE:
3736 case Token::TYPEOF:
3737 UNREACHABLE(); // handled above
3738 break;
3739
3740 case Token::SUB: {
Leon Clarkee46be812010-01-19 14:06:41 +00003741 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00003742 frame_->CallStub(&stub, 0);
3743 break;
3744 }
3745
3746 case Token::BIT_NOT: {
3747 // smi check
3748 JumpTarget smi_label;
3749 JumpTarget continue_label;
3750 __ tst(r0, Operand(kSmiTagMask));
3751 smi_label.Branch(eq);
3752
Leon Clarke4515c472010-02-03 11:58:03 +00003753 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
3754 frame_->CallStub(&stub, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003755 continue_label.Jump();
Leon Clarke4515c472010-02-03 11:58:03 +00003756
Steve Blocka7e24c12009-10-30 11:49:00 +00003757 smi_label.Bind();
3758 __ mvn(r0, Operand(r0));
3759 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
3760 continue_label.Bind();
3761 break;
3762 }
3763
3764 case Token::VOID:
3765 // since the stack top is cached in r0, popping and then
3766 // pushing a value can be done by just writing to r0.
3767 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3768 break;
3769
3770 case Token::ADD: {
3771 // Smi check.
3772 JumpTarget continue_label;
3773 __ tst(r0, Operand(kSmiTagMask));
3774 continue_label.Branch(eq);
3775 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003776 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003777 continue_label.Bind();
3778 break;
3779 }
3780 default:
3781 UNREACHABLE();
3782 }
3783 frame_->EmitPush(r0); // r0 has result
3784 }
3785 ASSERT(!has_valid_frame() ||
3786 (has_cc() && frame_->height() == original_height) ||
3787 (!has_cc() && frame_->height() == original_height + 1));
3788}
3789
3790
3791void CodeGenerator::VisitCountOperation(CountOperation* node) {
3792#ifdef DEBUG
3793 int original_height = frame_->height();
3794#endif
3795 VirtualFrame::SpilledScope spilled_scope;
3796 Comment cmnt(masm_, "[ CountOperation");
3797
3798 bool is_postfix = node->is_postfix();
3799 bool is_increment = node->op() == Token::INC;
3800
3801 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3802 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3803
3804 // Postfix: Make room for the result.
3805 if (is_postfix) {
3806 __ mov(r0, Operand(0));
3807 frame_->EmitPush(r0);
3808 }
3809
Leon Clarked91b9f72010-01-27 17:25:45 +00003810 // A constant reference is not saved to, so a constant reference is not a
3811 // compound assignment reference.
3812 { Reference target(this, node->expression(), !is_const);
Steve Blocka7e24c12009-10-30 11:49:00 +00003813 if (target.is_illegal()) {
3814 // Spoof the virtual frame to have the expected height (one higher
3815 // than on entry).
3816 if (!is_postfix) {
3817 __ mov(r0, Operand(Smi::FromInt(0)));
3818 frame_->EmitPush(r0);
3819 }
3820 ASSERT(frame_->height() == original_height + 1);
3821 return;
3822 }
Steve Blockd0582a62009-12-15 09:54:21 +00003823 target.GetValueAndSpill();
Steve Blocka7e24c12009-10-30 11:49:00 +00003824 frame_->EmitPop(r0);
3825
3826 JumpTarget slow;
3827 JumpTarget exit;
3828
3829 // Load the value (1) into register r1.
3830 __ mov(r1, Operand(Smi::FromInt(1)));
3831
3832 // Check for smi operand.
3833 __ tst(r0, Operand(kSmiTagMask));
3834 slow.Branch(ne);
3835
3836 // Postfix: Store the old value as the result.
3837 if (is_postfix) {
3838 __ str(r0, frame_->ElementAt(target.size()));
3839 }
3840
3841 // Perform optimistic increment/decrement.
3842 if (is_increment) {
3843 __ add(r0, r0, Operand(r1), SetCC);
3844 } else {
3845 __ sub(r0, r0, Operand(r1), SetCC);
3846 }
3847
3848 // If the increment/decrement didn't overflow, we're done.
3849 exit.Branch(vc);
3850
3851 // Revert optimistic increment/decrement.
3852 if (is_increment) {
3853 __ sub(r0, r0, Operand(r1));
3854 } else {
3855 __ add(r0, r0, Operand(r1));
3856 }
3857
3858 // Slow case: Convert to number.
3859 slow.Bind();
3860 {
3861 // Convert the operand to a number.
3862 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00003863 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003864 }
3865 if (is_postfix) {
3866 // Postfix: store to result (on the stack).
3867 __ str(r0, frame_->ElementAt(target.size()));
3868 }
3869
3870 // Compute the new value.
3871 __ mov(r1, Operand(Smi::FromInt(1)));
3872 frame_->EmitPush(r0);
3873 frame_->EmitPush(r1);
3874 if (is_increment) {
3875 frame_->CallRuntime(Runtime::kNumberAdd, 2);
3876 } else {
3877 frame_->CallRuntime(Runtime::kNumberSub, 2);
3878 }
3879
3880 // Store the new value in the target if not const.
3881 exit.Bind();
3882 frame_->EmitPush(r0);
3883 if (!is_const) target.SetValue(NOT_CONST_INIT);
3884 }
3885
3886 // Postfix: Discard the new value and use the old.
3887 if (is_postfix) frame_->EmitPop(r0);
3888 ASSERT(frame_->height() == original_height + 1);
3889}
3890
3891
3892void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3893#ifdef DEBUG
3894 int original_height = frame_->height();
3895#endif
3896 VirtualFrame::SpilledScope spilled_scope;
3897 Comment cmnt(masm_, "[ BinaryOperation");
3898 Token::Value op = node->op();
3899
3900 // According to ECMA-262 section 11.11, page 58, the binary logical
3901 // operators must yield the result of one of the two expressions
3902 // before any ToBoolean() conversions. This means that the value
3903 // produced by a && or || operator is not necessarily a boolean.
3904
3905 // NOTE: If the left hand side produces a materialized value (not in
3906 // the CC register), we force the right hand side to do the
3907 // same. This is necessary because we may have to branch to the exit
3908 // after evaluating the left hand side (due to the shortcut
3909 // semantics), but the compiler must (statically) know if the result
3910 // of compiling the binary operation is materialized or not.
3911
3912 if (op == Token::AND) {
3913 JumpTarget is_true;
3914 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003915 &is_true,
3916 false_target(),
3917 false);
3918 if (has_valid_frame() && !has_cc()) {
3919 // The left-hand side result is on top of the virtual frame.
3920 JumpTarget pop_and_continue;
3921 JumpTarget exit;
3922
3923 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
3924 frame_->EmitPush(r0);
3925 // Avoid popping the result if it converts to 'false' using the
3926 // standard ToBoolean() conversion as described in ECMA-262,
3927 // section 9.2, page 30.
3928 ToBoolean(&pop_and_continue, &exit);
3929 Branch(false, &exit);
3930
3931 // Pop the result of evaluating the first part.
3932 pop_and_continue.Bind();
3933 frame_->EmitPop(r0);
3934
3935 // Evaluate right side expression.
3936 is_true.Bind();
3937 LoadAndSpill(node->right());
3938
3939 // Exit (always with a materialized value).
3940 exit.Bind();
3941 } else if (has_cc() || is_true.is_linked()) {
3942 // The left-hand side is either (a) partially compiled to
3943 // control flow with a final branch left to emit or (b) fully
3944 // compiled to control flow and possibly true.
3945 if (has_cc()) {
3946 Branch(false, false_target());
3947 }
3948 is_true.Bind();
3949 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003950 true_target(),
3951 false_target(),
3952 false);
3953 } else {
3954 // Nothing to do.
3955 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
3956 }
3957
3958 } else if (op == Token::OR) {
3959 JumpTarget is_false;
3960 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003961 true_target(),
3962 &is_false,
3963 false);
3964 if (has_valid_frame() && !has_cc()) {
3965 // The left-hand side result is on top of the virtual frame.
3966 JumpTarget pop_and_continue;
3967 JumpTarget exit;
3968
3969 __ ldr(r0, frame_->Top());
3970 frame_->EmitPush(r0);
3971 // Avoid popping the result if it converts to 'true' using the
3972 // standard ToBoolean() conversion as described in ECMA-262,
3973 // section 9.2, page 30.
3974 ToBoolean(&exit, &pop_and_continue);
3975 Branch(true, &exit);
3976
3977 // Pop the result of evaluating the first part.
3978 pop_and_continue.Bind();
3979 frame_->EmitPop(r0);
3980
3981 // Evaluate right side expression.
3982 is_false.Bind();
3983 LoadAndSpill(node->right());
3984
3985 // Exit (always with a materialized value).
3986 exit.Bind();
3987 } else if (has_cc() || is_false.is_linked()) {
3988 // The left-hand side is either (a) partially compiled to
3989 // control flow with a final branch left to emit or (b) fully
3990 // compiled to control flow and possibly false.
3991 if (has_cc()) {
3992 Branch(true, true_target());
3993 }
3994 is_false.Bind();
3995 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003996 true_target(),
3997 false_target(),
3998 false);
3999 } else {
4000 // Nothing to do.
4001 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
4002 }
4003
4004 } else {
4005 // Optimize for the case where (at least) one of the expressions
4006 // is a literal small integer.
4007 Literal* lliteral = node->left()->AsLiteral();
4008 Literal* rliteral = node->right()->AsLiteral();
4009 // NOTE: The code below assumes that the slow cases (calls to runtime)
4010 // never return a constant/immutable object.
4011 bool overwrite_left =
4012 (node->left()->AsBinaryOperation() != NULL &&
4013 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
4014 bool overwrite_right =
4015 (node->right()->AsBinaryOperation() != NULL &&
4016 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
4017
4018 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
4019 LoadAndSpill(node->left());
4020 SmiOperation(node->op(),
4021 rliteral->handle(),
4022 false,
4023 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
4024
4025 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
4026 LoadAndSpill(node->right());
4027 SmiOperation(node->op(),
4028 lliteral->handle(),
4029 true,
4030 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
4031
4032 } else {
4033 OverwriteMode overwrite_mode = NO_OVERWRITE;
4034 if (overwrite_left) {
4035 overwrite_mode = OVERWRITE_LEFT;
4036 } else if (overwrite_right) {
4037 overwrite_mode = OVERWRITE_RIGHT;
4038 }
4039 LoadAndSpill(node->left());
4040 LoadAndSpill(node->right());
4041 GenericBinaryOperation(node->op(), overwrite_mode);
4042 }
4043 frame_->EmitPush(r0);
4044 }
4045 ASSERT(!has_valid_frame() ||
4046 (has_cc() && frame_->height() == original_height) ||
4047 (!has_cc() && frame_->height() == original_height + 1));
4048}
4049
4050
4051void CodeGenerator::VisitThisFunction(ThisFunction* node) {
4052#ifdef DEBUG
4053 int original_height = frame_->height();
4054#endif
4055 VirtualFrame::SpilledScope spilled_scope;
4056 __ ldr(r0, frame_->Function());
4057 frame_->EmitPush(r0);
4058 ASSERT(frame_->height() == original_height + 1);
4059}
4060
4061
4062void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
4063#ifdef DEBUG
4064 int original_height = frame_->height();
4065#endif
4066 VirtualFrame::SpilledScope spilled_scope;
4067 Comment cmnt(masm_, "[ CompareOperation");
4068
4069 // Get the expressions from the node.
4070 Expression* left = node->left();
4071 Expression* right = node->right();
4072 Token::Value op = node->op();
4073
4074 // To make null checks efficient, we check if either left or right is the
4075 // literal 'null'. If so, we optimize the code by inlining a null check
4076 // instead of calling the (very) general runtime routine for checking
4077 // equality.
4078 if (op == Token::EQ || op == Token::EQ_STRICT) {
4079 bool left_is_null =
4080 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
4081 bool right_is_null =
4082 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
4083 // The 'null' value can only be equal to 'null' or 'undefined'.
4084 if (left_is_null || right_is_null) {
4085 LoadAndSpill(left_is_null ? right : left);
4086 frame_->EmitPop(r0);
4087 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4088 __ cmp(r0, ip);
4089
4090 // The 'null' value is only equal to 'undefined' if using non-strict
4091 // comparisons.
4092 if (op != Token::EQ_STRICT) {
4093 true_target()->Branch(eq);
4094
4095 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4096 __ cmp(r0, Operand(ip));
4097 true_target()->Branch(eq);
4098
4099 __ tst(r0, Operand(kSmiTagMask));
4100 false_target()->Branch(eq);
4101
4102 // It can be an undetectable object.
4103 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
4104 __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
4105 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
4106 __ cmp(r0, Operand(1 << Map::kIsUndetectable));
4107 }
4108
4109 cc_reg_ = eq;
4110 ASSERT(has_cc() && frame_->height() == original_height);
4111 return;
4112 }
4113 }
4114
4115 // To make typeof testing for natives implemented in JavaScript really
4116 // efficient, we generate special code for expressions of the form:
4117 // 'typeof <expression> == <string>'.
4118 UnaryOperation* operation = left->AsUnaryOperation();
4119 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
4120 (operation != NULL && operation->op() == Token::TYPEOF) &&
4121 (right->AsLiteral() != NULL &&
4122 right->AsLiteral()->handle()->IsString())) {
4123 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
4124
4125 // Load the operand, move it to register r1.
4126 LoadTypeofExpression(operation->expression());
4127 frame_->EmitPop(r1);
4128
4129 if (check->Equals(Heap::number_symbol())) {
4130 __ tst(r1, Operand(kSmiTagMask));
4131 true_target()->Branch(eq);
4132 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4133 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4134 __ cmp(r1, ip);
4135 cc_reg_ = eq;
4136
4137 } else if (check->Equals(Heap::string_symbol())) {
4138 __ tst(r1, Operand(kSmiTagMask));
4139 false_target()->Branch(eq);
4140
4141 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4142
4143 // It can be an undetectable string object.
4144 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4145 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4146 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4147 false_target()->Branch(eq);
4148
4149 __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4150 __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
4151 cc_reg_ = lt;
4152
4153 } else if (check->Equals(Heap::boolean_symbol())) {
4154 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4155 __ cmp(r1, ip);
4156 true_target()->Branch(eq);
4157 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4158 __ cmp(r1, ip);
4159 cc_reg_ = eq;
4160
4161 } else if (check->Equals(Heap::undefined_symbol())) {
4162 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4163 __ cmp(r1, ip);
4164 true_target()->Branch(eq);
4165
4166 __ tst(r1, Operand(kSmiTagMask));
4167 false_target()->Branch(eq);
4168
4169 // It can be an undetectable object.
4170 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
4171 __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
4172 __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
4173 __ cmp(r2, Operand(1 << Map::kIsUndetectable));
4174
4175 cc_reg_ = eq;
4176
4177 } else if (check->Equals(Heap::function_symbol())) {
4178 __ tst(r1, Operand(kSmiTagMask));
4179 false_target()->Branch(eq);
Steve Blockd0582a62009-12-15 09:54:21 +00004180 Register map_reg = r2;
4181 __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
4182 true_target()->Branch(eq);
4183 // Regular expressions are callable so typeof == 'function'.
4184 __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004185 cc_reg_ = eq;
4186
4187 } else if (check->Equals(Heap::object_symbol())) {
4188 __ tst(r1, Operand(kSmiTagMask));
4189 false_target()->Branch(eq);
4190
Steve Blocka7e24c12009-10-30 11:49:00 +00004191 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4192 __ cmp(r1, ip);
4193 true_target()->Branch(eq);
4194
Steve Blockd0582a62009-12-15 09:54:21 +00004195 Register map_reg = r2;
4196 __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
4197 false_target()->Branch(eq);
4198
Steve Blocka7e24c12009-10-30 11:49:00 +00004199 // It can be an undetectable object.
Steve Blockd0582a62009-12-15 09:54:21 +00004200 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00004201 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4202 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4203 false_target()->Branch(eq);
4204
Steve Blockd0582a62009-12-15 09:54:21 +00004205 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4206 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004207 false_target()->Branch(lt);
Steve Blockd0582a62009-12-15 09:54:21 +00004208 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00004209 cc_reg_ = le;
4210
4211 } else {
4212 // Uncommon case: typeof testing against a string literal that is
4213 // never returned from the typeof operator.
4214 false_target()->Jump();
4215 }
4216 ASSERT(!has_valid_frame() ||
4217 (has_cc() && frame_->height() == original_height));
4218 return;
4219 }
4220
4221 switch (op) {
4222 case Token::EQ:
4223 Comparison(eq, left, right, false);
4224 break;
4225
4226 case Token::LT:
4227 Comparison(lt, left, right);
4228 break;
4229
4230 case Token::GT:
4231 Comparison(gt, left, right);
4232 break;
4233
4234 case Token::LTE:
4235 Comparison(le, left, right);
4236 break;
4237
4238 case Token::GTE:
4239 Comparison(ge, left, right);
4240 break;
4241
4242 case Token::EQ_STRICT:
4243 Comparison(eq, left, right, true);
4244 break;
4245
4246 case Token::IN: {
4247 LoadAndSpill(left);
4248 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00004249 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004250 frame_->EmitPush(r0);
4251 break;
4252 }
4253
4254 case Token::INSTANCEOF: {
4255 LoadAndSpill(left);
4256 LoadAndSpill(right);
4257 InstanceofStub stub;
4258 frame_->CallStub(&stub, 2);
4259 // At this point if instanceof succeeded then r0 == 0.
4260 __ tst(r0, Operand(r0));
4261 cc_reg_ = eq;
4262 break;
4263 }
4264
4265 default:
4266 UNREACHABLE();
4267 }
4268 ASSERT((has_cc() && frame_->height() == original_height) ||
4269 (!has_cc() && frame_->height() == original_height + 1));
4270}
4271
4272
Leon Clarked91b9f72010-01-27 17:25:45 +00004273void CodeGenerator::EmitKeyedLoad(bool is_global) {
4274 Comment cmnt(masm_, "[ Load from keyed Property");
4275 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
4276 RelocInfo::Mode rmode = is_global
4277 ? RelocInfo::CODE_TARGET_CONTEXT
4278 : RelocInfo::CODE_TARGET;
4279 frame_->CallCodeObject(ic, rmode, 0);
4280}
4281
4282
Steve Blocka7e24c12009-10-30 11:49:00 +00004283#ifdef DEBUG
4284bool CodeGenerator::HasValidEntryRegisters() { return true; }
4285#endif
4286
4287
4288#undef __
4289#define __ ACCESS_MASM(masm)
4290
4291
4292Handle<String> Reference::GetName() {
4293 ASSERT(type_ == NAMED);
4294 Property* property = expression_->AsProperty();
4295 if (property == NULL) {
4296 // Global variable reference treated as a named property reference.
4297 VariableProxy* proxy = expression_->AsVariableProxy();
4298 ASSERT(proxy->AsVariable() != NULL);
4299 ASSERT(proxy->AsVariable()->is_global());
4300 return proxy->name();
4301 } else {
4302 Literal* raw_name = property->key()->AsLiteral();
4303 ASSERT(raw_name != NULL);
4304 return Handle<String>(String::cast(*raw_name->handle()));
4305 }
4306}
4307
4308
Steve Blockd0582a62009-12-15 09:54:21 +00004309void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004310 ASSERT(cgen_->HasValidEntryRegisters());
4311 ASSERT(!is_illegal());
4312 ASSERT(!cgen_->has_cc());
4313 MacroAssembler* masm = cgen_->masm();
4314 Property* property = expression_->AsProperty();
4315 if (property != NULL) {
4316 cgen_->CodeForSourcePosition(property->position());
4317 }
4318
4319 switch (type_) {
4320 case SLOT: {
4321 Comment cmnt(masm, "[ Load from Slot");
4322 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
4323 ASSERT(slot != NULL);
Steve Blockd0582a62009-12-15 09:54:21 +00004324 cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
Steve Blocka7e24c12009-10-30 11:49:00 +00004325 break;
4326 }
4327
4328 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004329 VirtualFrame* frame = cgen_->frame();
4330 Comment cmnt(masm, "[ Load from named Property");
4331 Handle<String> name(GetName());
4332 Variable* var = expression_->AsVariableProxy()->AsVariable();
4333 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
4334 // Setup the name register.
Steve Blocka7e24c12009-10-30 11:49:00 +00004335 __ mov(r2, Operand(name));
4336 ASSERT(var == NULL || var->is_global());
4337 RelocInfo::Mode rmode = (var == NULL)
4338 ? RelocInfo::CODE_TARGET
4339 : RelocInfo::CODE_TARGET_CONTEXT;
Leon Clarke4515c472010-02-03 11:58:03 +00004340 frame->CallCodeObject(ic, rmode, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004341 frame->EmitPush(r0);
4342 break;
4343 }
4344
4345 case KEYED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00004346 // TODO(181): Implement inlined version of array indexing once
4347 // loop nesting is properly tracked on ARM.
Steve Blocka7e24c12009-10-30 11:49:00 +00004348 ASSERT(property != NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00004349 Variable* var = expression_->AsVariableProxy()->AsVariable();
4350 ASSERT(var == NULL || var->is_global());
Leon Clarked91b9f72010-01-27 17:25:45 +00004351 cgen_->EmitKeyedLoad(var != NULL);
4352 cgen_->frame()->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004353 break;
4354 }
4355
4356 default:
4357 UNREACHABLE();
4358 }
Leon Clarked91b9f72010-01-27 17:25:45 +00004359
4360 if (!persist_after_get_) {
4361 cgen_->UnloadReference(this);
4362 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004363}
4364
4365
4366void Reference::SetValue(InitState init_state) {
4367 ASSERT(!is_illegal());
4368 ASSERT(!cgen_->has_cc());
4369 MacroAssembler* masm = cgen_->masm();
4370 VirtualFrame* frame = cgen_->frame();
4371 Property* property = expression_->AsProperty();
4372 if (property != NULL) {
4373 cgen_->CodeForSourcePosition(property->position());
4374 }
4375
4376 switch (type_) {
4377 case SLOT: {
4378 Comment cmnt(masm, "[ Store to Slot");
4379 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00004380 cgen_->StoreToSlot(slot, init_state);
Leon Clarke4515c472010-02-03 11:58:03 +00004381 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004382 break;
4383 }
4384
4385 case NAMED: {
4386 Comment cmnt(masm, "[ Store to named Property");
4387 // Call the appropriate IC code.
4388 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4389 Handle<String> name(GetName());
4390
Steve Blocka7e24c12009-10-30 11:49:00 +00004391 frame->EmitPop(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004392 // Setup the name register.
Steve Blocka7e24c12009-10-30 11:49:00 +00004393 __ mov(r2, Operand(name));
Leon Clarke4515c472010-02-03 11:58:03 +00004394 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004395 frame->EmitPush(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00004396 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004397 break;
4398 }
4399
4400 case KEYED: {
4401 Comment cmnt(masm, "[ Store to keyed Property");
4402 Property* property = expression_->AsProperty();
4403 ASSERT(property != NULL);
4404 cgen_->CodeForSourcePosition(property->position());
4405
4406 // Call IC code.
4407 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4408 // TODO(1222589): Make the IC grab the values from the stack.
Steve Blocka7e24c12009-10-30 11:49:00 +00004409 frame->EmitPop(r0); // value
Leon Clarke4515c472010-02-03 11:58:03 +00004410 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004411 frame->EmitPush(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00004412 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00004413 break;
4414 }
4415
4416 default:
4417 UNREACHABLE();
4418 }
4419}
4420
4421
Leon Clarkee46be812010-01-19 14:06:41 +00004422void FastNewClosureStub::Generate(MacroAssembler* masm) {
4423 // Clone the boilerplate in new space. Set the context to the
4424 // current context in cp.
4425 Label gc;
4426
4427 // Pop the boilerplate function from the stack.
4428 __ pop(r3);
4429
4430 // Attempt to allocate new JSFunction in new space.
4431 __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
4432 r0,
4433 r1,
4434 r2,
4435 &gc,
4436 TAG_OBJECT);
4437
4438 // Compute the function map in the current global context and set that
4439 // as the map of the allocated object.
4440 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4441 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4442 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
4443 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4444
4445 // Clone the rest of the boilerplate fields. We don't have to update
4446 // the write barrier because the allocated object is in new space.
4447 for (int offset = kPointerSize;
4448 offset < JSFunction::kSize;
4449 offset += kPointerSize) {
4450 if (offset == JSFunction::kContextOffset) {
4451 __ str(cp, FieldMemOperand(r0, offset));
4452 } else {
4453 __ ldr(r1, FieldMemOperand(r3, offset));
4454 __ str(r1, FieldMemOperand(r0, offset));
4455 }
4456 }
4457
4458 // Return result. The argument boilerplate has been popped already.
4459 __ Ret();
4460
4461 // Create a new closure through the slower runtime call.
4462 __ bind(&gc);
4463 __ push(cp);
4464 __ push(r3);
4465 __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
4466}
4467
4468
4469void FastNewContextStub::Generate(MacroAssembler* masm) {
4470 // Try to allocate the context in new space.
4471 Label gc;
4472 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
4473
4474 // Attempt to allocate the context in new space.
4475 __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
4476 r0,
4477 r1,
4478 r2,
4479 &gc,
4480 TAG_OBJECT);
4481
4482 // Load the function from the stack.
4483 __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
4484
4485 // Setup the object header.
4486 __ LoadRoot(r2, Heap::kContextMapRootIndex);
4487 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4488 __ mov(r2, Operand(length));
4489 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
4490
4491 // Setup the fixed slots.
4492 __ mov(r1, Operand(Smi::FromInt(0)));
4493 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
4494 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
4495 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4496 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
4497
4498 // Copy the global object from the surrounding context.
4499 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4500 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
4501
4502 // Initialize the rest of the slots to undefined.
4503 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
4504 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
4505 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
4506 }
4507
4508 // Remove the on-stack argument and return.
4509 __ mov(cp, r0);
4510 __ pop();
4511 __ Ret();
4512
4513 // Need to collect. Call into runtime system.
4514 __ bind(&gc);
4515 __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
4516}
4517
4518
Steve Blocka7e24c12009-10-30 11:49:00 +00004519// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
4520// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
4521// (31 instead of 32).
4522static void CountLeadingZeros(
4523 MacroAssembler* masm,
4524 Register source,
4525 Register scratch,
4526 Register zeros) {
4527#ifdef CAN_USE_ARMV5_INSTRUCTIONS
4528 __ clz(zeros, source); // This instruction is only supported after ARM5.
4529#else
4530 __ mov(zeros, Operand(0));
4531 __ mov(scratch, source);
4532 // Top 16.
4533 __ tst(scratch, Operand(0xffff0000));
4534 __ add(zeros, zeros, Operand(16), LeaveCC, eq);
4535 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
4536 // Top 8.
4537 __ tst(scratch, Operand(0xff000000));
4538 __ add(zeros, zeros, Operand(8), LeaveCC, eq);
4539 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4540 // Top 4.
4541 __ tst(scratch, Operand(0xf0000000));
4542 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4543 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4544 // Top 2.
4545 __ tst(scratch, Operand(0xc0000000));
4546 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4547 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4548 // Top bit.
4549 __ tst(scratch, Operand(0x80000000u));
4550 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4551#endif
4552}
4553
4554
4555// Takes a Smi and converts to an IEEE 64 bit floating point value in two
4556// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4557// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4558// scratch register. Destroys the source register. No GC occurs during this
4559// stub so you don't have to set up the frame.
4560class ConvertToDoubleStub : public CodeStub {
4561 public:
4562 ConvertToDoubleStub(Register result_reg_1,
4563 Register result_reg_2,
4564 Register source_reg,
4565 Register scratch_reg)
4566 : result1_(result_reg_1),
4567 result2_(result_reg_2),
4568 source_(source_reg),
4569 zeros_(scratch_reg) { }
4570
4571 private:
4572 Register result1_;
4573 Register result2_;
4574 Register source_;
4575 Register zeros_;
4576
4577 // Minor key encoding in 16 bits.
4578 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4579 class OpBits: public BitField<Token::Value, 2, 14> {};
4580
4581 Major MajorKey() { return ConvertToDouble; }
4582 int MinorKey() {
4583 // Encode the parameters in a unique 16 bit value.
4584 return result1_.code() +
4585 (result2_.code() << 4) +
4586 (source_.code() << 8) +
4587 (zeros_.code() << 12);
4588 }
4589
4590 void Generate(MacroAssembler* masm);
4591
4592 const char* GetName() { return "ConvertToDoubleStub"; }
4593
4594#ifdef DEBUG
4595 void Print() { PrintF("ConvertToDoubleStub\n"); }
4596#endif
4597};
4598
4599
4600void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
4601#ifndef BIG_ENDIAN_FLOATING_POINT
4602 Register exponent = result1_;
4603 Register mantissa = result2_;
4604#else
4605 Register exponent = result2_;
4606 Register mantissa = result1_;
4607#endif
4608 Label not_special;
4609 // Convert from Smi to integer.
4610 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
4611 // Move sign bit from source to destination. This works because the sign bit
4612 // in the exponent word of the double has the same position and polarity as
4613 // the 2's complement sign bit in a Smi.
4614 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4615 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
4616 // Subtract from 0 if source was negative.
4617 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
4618 __ cmp(source_, Operand(1));
4619 __ b(gt, &not_special);
4620
4621 // We have -1, 0 or 1, which we treat specially.
4622 __ cmp(source_, Operand(0));
4623 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
4624 static const uint32_t exponent_word_for_1 =
4625 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
4626 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
4627 // 1, 0 and -1 all have 0 for the second word.
4628 __ mov(mantissa, Operand(0));
4629 __ Ret();
4630
4631 __ bind(&not_special);
4632 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
4633 // Gets the wrong answer for 0, but we already checked for that case above.
4634 CountLeadingZeros(masm, source_, mantissa, zeros_);
4635 // Compute exponent and or it into the exponent register.
4636 // We use result2 as a scratch register here.
4637 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
4638 __ orr(exponent,
4639 exponent,
4640 Operand(mantissa, LSL, HeapNumber::kExponentShift));
4641 // Shift up the source chopping the top bit off.
4642 __ add(zeros_, zeros_, Operand(1));
4643 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
4644 __ mov(source_, Operand(source_, LSL, zeros_));
4645 // Compute lower part of fraction (last 12 bits).
4646 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
4647 // And the top (top 20 bits).
4648 __ orr(exponent,
4649 exponent,
4650 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
4651 __ Ret();
4652}
4653
4654
4655// This stub can convert a signed int32 to a heap number (double). It does
4656// not work for int32s that are in Smi range! No GC occurs during this stub
4657// so you don't have to set up the frame.
4658class WriteInt32ToHeapNumberStub : public CodeStub {
4659 public:
4660 WriteInt32ToHeapNumberStub(Register the_int,
4661 Register the_heap_number,
4662 Register scratch)
4663 : the_int_(the_int),
4664 the_heap_number_(the_heap_number),
4665 scratch_(scratch) { }
4666
4667 private:
4668 Register the_int_;
4669 Register the_heap_number_;
4670 Register scratch_;
4671
4672 // Minor key encoding in 16 bits.
4673 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
4674 class OpBits: public BitField<Token::Value, 2, 14> {};
4675
4676 Major MajorKey() { return WriteInt32ToHeapNumber; }
4677 int MinorKey() {
4678 // Encode the parameters in a unique 16 bit value.
4679 return the_int_.code() +
4680 (the_heap_number_.code() << 4) +
4681 (scratch_.code() << 8);
4682 }
4683
4684 void Generate(MacroAssembler* masm);
4685
4686 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
4687
4688#ifdef DEBUG
4689 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
4690#endif
4691};
4692
4693
4694// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00004695void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004696 Label max_negative_int;
4697 // the_int_ has the answer which is a signed int32 but not a Smi.
4698 // We test for the special value that has a different exponent. This test
4699 // has the neat side effect of setting the flags according to the sign.
4700 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4701 __ cmp(the_int_, Operand(0x80000000u));
4702 __ b(eq, &max_negative_int);
4703 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4704 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4705 uint32_t non_smi_exponent =
4706 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4707 __ mov(scratch_, Operand(non_smi_exponent));
4708 // Set the sign bit in scratch_ if the value was negative.
4709 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4710 // Subtract from 0 if the value was negative.
4711 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
4712 // We should be masking the implict first digit of the mantissa away here,
4713 // but it just ends up combining harmlessly with the last digit of the
4714 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
4715 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
4716 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
4717 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
4718 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
4719 __ str(scratch_, FieldMemOperand(the_heap_number_,
4720 HeapNumber::kExponentOffset));
4721 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
4722 __ str(scratch_, FieldMemOperand(the_heap_number_,
4723 HeapNumber::kMantissaOffset));
4724 __ Ret();
4725
4726 __ bind(&max_negative_int);
4727 // The max negative int32 is stored as a positive number in the mantissa of
4728 // a double because it uses a sign bit instead of using two's complement.
4729 // The actual mantissa bits stored are all 0 because the implicit most
4730 // significant 1 bit is not stored.
4731 non_smi_exponent += 1 << HeapNumber::kExponentShift;
4732 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
4733 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
4734 __ mov(ip, Operand(0));
4735 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
4736 __ Ret();
4737}
4738
4739
4740// Handle the case where the lhs and rhs are the same object.
4741// Equality is almost reflexive (everything but NaN), so this is a test
4742// for "identity and not NaN".
4743static void EmitIdenticalObjectComparison(MacroAssembler* masm,
4744 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00004745 Condition cc,
4746 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004747 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00004748 Label heap_number, return_equal;
4749 Register exp_mask_reg = r5;
Steve Blocka7e24c12009-10-30 11:49:00 +00004750 __ cmp(r0, Operand(r1));
4751 __ b(ne, &not_identical);
4752
Leon Clarkee46be812010-01-19 14:06:41 +00004753 // The two objects are identical. If we know that one of them isn't NaN then
4754 // we now know they test equal.
4755 if (cc != eq || !never_nan_nan) {
4756 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004757
Leon Clarkee46be812010-01-19 14:06:41 +00004758 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4759 // so we do the second best thing - test it ourselves.
4760 // They are both equal and they are not both Smis so both of them are not
4761 // Smis. If it's not a heap number, then return equal.
4762 if (cc == lt || cc == gt) {
4763 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004764 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00004765 } else {
4766 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4767 __ b(eq, &heap_number);
4768 // Comparing JS objects with <=, >= is complicated.
4769 if (cc != eq) {
4770 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
4771 __ b(ge, slow);
4772 // Normally here we fall through to return_equal, but undefined is
4773 // special: (undefined == undefined) == true, but
4774 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
4775 if (cc == le || cc == ge) {
4776 __ cmp(r4, Operand(ODDBALL_TYPE));
4777 __ b(ne, &return_equal);
4778 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4779 __ cmp(r0, Operand(r2));
4780 __ b(ne, &return_equal);
4781 if (cc == le) {
4782 // undefined <= undefined should fail.
4783 __ mov(r0, Operand(GREATER));
4784 } else {
4785 // undefined >= undefined should fail.
4786 __ mov(r0, Operand(LESS));
4787 }
4788 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00004789 }
Steve Blockd0582a62009-12-15 09:54:21 +00004790 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004791 }
4792 }
Leon Clarkee46be812010-01-19 14:06:41 +00004793
Steve Blocka7e24c12009-10-30 11:49:00 +00004794 __ bind(&return_equal);
4795 if (cc == lt) {
4796 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
4797 } else if (cc == gt) {
4798 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
4799 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00004800 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00004801 }
4802 __ mov(pc, Operand(lr)); // Return.
4803
Leon Clarkee46be812010-01-19 14:06:41 +00004804 if (cc != eq || !never_nan_nan) {
4805 // For less and greater we don't have to check for NaN since the result of
4806 // x < x is false regardless. For the others here is some code to check
4807 // for NaN.
4808 if (cc != lt && cc != gt) {
4809 __ bind(&heap_number);
4810 // It is a heap number, so return non-equal if it's NaN and equal if it's
4811 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00004812
Leon Clarkee46be812010-01-19 14:06:41 +00004813 // The representation of NaN values has all exponent bits (52..62) set,
4814 // and not all mantissa bits (0..51) clear.
4815 // Read top bits of double representation (second word of value).
4816 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
4817 // Test that exponent bits are all set.
4818 __ and_(r3, r2, Operand(exp_mask_reg));
4819 __ cmp(r3, Operand(exp_mask_reg));
4820 __ b(ne, &return_equal);
4821
4822 // Shift out flag and all exponent bits, retaining only mantissa.
4823 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
4824 // Or with all low-bits of mantissa.
4825 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
4826 __ orr(r0, r3, Operand(r2), SetCC);
4827 // For equal we already have the right value in r0: Return zero (equal)
4828 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
4829 // not (it's a NaN). For <= and >= we need to load r0 with the failing
4830 // value if it's a NaN.
4831 if (cc != eq) {
4832 // All-zero means Infinity means equal.
4833 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
4834 if (cc == le) {
4835 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
4836 } else {
4837 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
4838 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004839 }
Leon Clarkee46be812010-01-19 14:06:41 +00004840 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00004841 }
Leon Clarkee46be812010-01-19 14:06:41 +00004842 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00004843 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004844
4845 __ bind(&not_identical);
4846}
4847
4848
4849// See comment at call site.
4850static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00004851 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00004852 Label* slow,
4853 bool strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004854 Label rhs_is_smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00004855 __ tst(r0, Operand(kSmiTagMask));
Leon Clarked91b9f72010-01-27 17:25:45 +00004856 __ b(eq, &rhs_is_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00004857
Leon Clarked91b9f72010-01-27 17:25:45 +00004858 // Lhs is a Smi. Check whether the rhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00004859 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4860 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004861 // If rhs is not a number and lhs is a Smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00004862 // succeed. Return non-equal (r0 is already not zero)
4863 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4864 } else {
4865 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4866 // the runtime.
4867 __ b(ne, slow);
4868 }
4869
Leon Clarked91b9f72010-01-27 17:25:45 +00004870 // Lhs (r1) is a smi, rhs (r0) is a number.
Steve Blockd0582a62009-12-15 09:54:21 +00004871 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004872 // Convert lhs to a double in d7 .
Steve Blockd0582a62009-12-15 09:54:21 +00004873 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00004874 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
4875 __ vmov(s15, r7);
4876 __ vcvt(d7, s15);
4877 // Load the double from rhs, tagged HeapNumber r0, to d6.
4878 __ sub(r7, r0, Operand(kHeapObjectTag));
4879 __ vldr(d6, r7, HeapNumber::kValueOffset);
Steve Blockd0582a62009-12-15 09:54:21 +00004880 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004881 __ push(lr);
4882 // Convert lhs to a double in r2, r3.
Steve Blockd0582a62009-12-15 09:54:21 +00004883 __ mov(r7, Operand(r1));
4884 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4885 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00004886 // Load rhs to a double in r0, r1.
4887 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4888 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4889 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004890 }
4891
Steve Blocka7e24c12009-10-30 11:49:00 +00004892 // We now have both loaded as doubles but we can skip the lhs nan check
Leon Clarked91b9f72010-01-27 17:25:45 +00004893 // since it's a smi.
Leon Clarkee46be812010-01-19 14:06:41 +00004894 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00004895
Leon Clarked91b9f72010-01-27 17:25:45 +00004896 __ bind(&rhs_is_smi);
4897 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00004898 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4899 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004900 // If lhs is not a number and rhs is a smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00004901 // succeed. Return non-equal.
4902 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4903 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4904 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004905 // Smi compared non-strictly with a non-smi non-heap-number. Call
Steve Blocka7e24c12009-10-30 11:49:00 +00004906 // the runtime.
4907 __ b(ne, slow);
4908 }
4909
Leon Clarked91b9f72010-01-27 17:25:45 +00004910 // Rhs (r0) is a smi, lhs (r1) is a heap number.
Steve Blockd0582a62009-12-15 09:54:21 +00004911 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00004912 // Convert rhs to a double in d6 .
Steve Blockd0582a62009-12-15 09:54:21 +00004913 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00004914 // Load the double from lhs, tagged HeapNumber r1, to d7.
4915 __ sub(r7, r1, Operand(kHeapObjectTag));
4916 __ vldr(d7, r7, HeapNumber::kValueOffset);
4917 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
4918 __ vmov(s13, r7);
4919 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00004920 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00004921 __ push(lr);
4922 // Load lhs to a double in r2, r3.
4923 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4924 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4925 // Convert rhs to a double in r0, r1.
Steve Blockd0582a62009-12-15 09:54:21 +00004926 __ mov(r7, Operand(r0));
4927 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4928 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00004929 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00004930 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004931 // Fall through to both_loaded_as_doubles.
4932}
4933
4934
Leon Clarkee46be812010-01-19 14:06:41 +00004935void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004936 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004937 Register rhs_exponent = exp_first ? r0 : r1;
4938 Register lhs_exponent = exp_first ? r2 : r3;
4939 Register rhs_mantissa = exp_first ? r1 : r0;
4940 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004941 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00004942 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00004943
4944 Register exp_mask_reg = r5;
4945
4946 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004947 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
4948 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00004949 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00004950 __ mov(r4,
4951 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4952 SetCC);
4953 __ b(ne, &one_is_nan);
4954 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00004955 __ b(ne, &one_is_nan);
4956
4957 __ bind(lhs_not_nan);
4958 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
4959 __ bind(&lhs_not_nan_exp_mask_is_loaded);
4960 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
4961 __ cmp(r4, Operand(exp_mask_reg));
4962 __ b(ne, &neither_is_nan);
4963 __ mov(r4,
4964 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
4965 SetCC);
4966 __ b(ne, &one_is_nan);
4967 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00004968 __ b(eq, &neither_is_nan);
4969
4970 __ bind(&one_is_nan);
4971 // NaN comparisons always fail.
4972 // Load whatever we need in r0 to make the comparison fail.
4973 if (cc == lt || cc == le) {
4974 __ mov(r0, Operand(GREATER));
4975 } else {
4976 __ mov(r0, Operand(LESS));
4977 }
4978 __ mov(pc, Operand(lr)); // Return.
4979
4980 __ bind(&neither_is_nan);
4981}
4982
4983
4984// See comment at call site.
4985static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
4986 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00004987 Register rhs_exponent = exp_first ? r0 : r1;
4988 Register lhs_exponent = exp_first ? r2 : r3;
4989 Register rhs_mantissa = exp_first ? r1 : r0;
4990 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00004991
4992 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
4993 if (cc == eq) {
4994 // Doubles are not equal unless they have the same bit pattern.
4995 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00004996 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
4997 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00004998 // Return non-zero if the numbers are unequal.
4999 __ mov(pc, Operand(lr), LeaveCC, ne);
5000
Leon Clarkee46be812010-01-19 14:06:41 +00005001 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005002 // If exponents are equal then return 0.
5003 __ mov(pc, Operand(lr), LeaveCC, eq);
5004
5005 // Exponents are unequal. The only way we can return that the numbers
5006 // are equal is if one is -0 and the other is 0. We already dealt
5007 // with the case where both are -0 or both are 0.
5008 // We start by seeing if the mantissas (that are equal) or the bottom
5009 // 31 bits of the rhs exponent are non-zero. If so we return not
5010 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00005011 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00005012 __ mov(r0, Operand(r4), LeaveCC, ne);
5013 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
5014 // Now they are equal if and only if the lhs exponent is zero in its
5015 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00005016 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00005017 __ mov(pc, Operand(lr));
5018 } else {
5019 // Call a native function to do a comparison between two non-NaNs.
5020 // Call C routine that may not cause GC or other trouble.
5021 __ mov(r5, Operand(ExternalReference::compare_doubles()));
5022 __ Jump(r5); // Tail call.
5023 }
5024}
5025
5026
5027// See comment at call site.
5028static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
5029 // If either operand is a JSObject or an oddball value, then they are
5030 // not equal since their pointers are different.
5031 // There is no test for undetectability in strict equality.
5032 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
5033 Label first_non_object;
5034 // Get the type of the first operand into r2 and compare it with
5035 // FIRST_JS_OBJECT_TYPE.
5036 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
5037 __ b(lt, &first_non_object);
5038
5039 // Return non-zero (r0 is not zero)
5040 Label return_not_equal;
5041 __ bind(&return_not_equal);
5042 __ mov(pc, Operand(lr)); // Return.
5043
5044 __ bind(&first_non_object);
5045 // Check for oddballs: true, false, null, undefined.
5046 __ cmp(r2, Operand(ODDBALL_TYPE));
5047 __ b(eq, &return_not_equal);
5048
5049 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
5050 __ b(ge, &return_not_equal);
5051
5052 // Check for oddballs: true, false, null, undefined.
5053 __ cmp(r3, Operand(ODDBALL_TYPE));
5054 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00005055
5056 // Now that we have the types we might as well check for symbol-symbol.
5057 // Ensure that no non-strings have the symbol bit set.
5058 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5059 ASSERT(kSymbolTag != 0);
5060 __ and_(r2, r2, Operand(r3));
5061 __ tst(r2, Operand(kIsSymbolMask));
5062 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00005063}
5064
5065
5066// See comment at call site.
5067static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
5068 Label* both_loaded_as_doubles,
5069 Label* not_heap_numbers,
5070 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00005071 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005072 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00005073 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5074 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00005075 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
5076
5077 // Both are heap numbers. Load them up then jump to the code we have
5078 // for that.
Leon Clarked91b9f72010-01-27 17:25:45 +00005079 if (CpuFeatures::IsSupported(VFP3)) {
5080 CpuFeatures::Scope scope(VFP3);
5081 __ sub(r7, r0, Operand(kHeapObjectTag));
5082 __ vldr(d6, r7, HeapNumber::kValueOffset);
5083 __ sub(r7, r1, Operand(kHeapObjectTag));
5084 __ vldr(d7, r7, HeapNumber::kValueOffset);
5085 } else {
5086 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5087 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5088 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5089 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5090 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005091 __ jmp(both_loaded_as_doubles);
5092}
5093
5094
5095// Fast negative check for symbol-to-symbol equality.
5096static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5097 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00005098 // Ensure that no non-strings have the symbol bit set.
5099 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5100 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005101 __ tst(r2, Operand(kIsSymbolMask));
5102 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00005103 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
5104 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005105 __ tst(r3, Operand(kIsSymbolMask));
5106 __ b(eq, slow);
5107
5108 // Both are symbols. We already checked they weren't the same pointer
5109 // so they are not equal.
5110 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
5111 __ mov(pc, Operand(lr)); // Return.
5112}
5113
5114
Leon Clarked91b9f72010-01-27 17:25:45 +00005115// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
5116// On exit r0 is 0, positive or negative to indicate the result of
5117// the comparison.
Steve Blocka7e24c12009-10-30 11:49:00 +00005118void CompareStub::Generate(MacroAssembler* masm) {
5119 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00005120 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00005121
5122 // NOTICE! This code is only reached after a smi-fast-case check, so
5123 // it is certain that at least one operand isn't a smi.
5124
5125 // Handle the case where the objects are identical. Either returns the answer
5126 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00005127 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005128
5129 // If either is a Smi (we know that not both are), then they can only
5130 // be strictly equal if the other is a HeapNumber.
5131 ASSERT_EQ(0, kSmiTag);
5132 ASSERT_EQ(0, Smi::FromInt(0));
5133 __ and_(r2, r0, Operand(r1));
5134 __ tst(r2, Operand(kSmiTagMask));
5135 __ b(ne, &not_smis);
5136 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5137 // 1) Return the answer.
5138 // 2) Go to slow.
5139 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00005140 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00005141 // In cases 3 and 4 we have found out we were dealing with a number-number
Leon Clarked91b9f72010-01-27 17:25:45 +00005142 // comparison. If VFP3 is supported the double values of the numbers have
5143 // been loaded into d7 and d6. Otherwise, the double values have been loaded
5144 // into r0, r1, r2, and r3.
Leon Clarkee46be812010-01-19 14:06:41 +00005145 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005146
5147 __ bind(&both_loaded_as_doubles);
Leon Clarked91b9f72010-01-27 17:25:45 +00005148 // The arguments have been converted to doubles and stored in d6 and d7, if
5149 // VFP3 is supported, or in r0, r1, r2, and r3.
Steve Blockd0582a62009-12-15 09:54:21 +00005150 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00005151 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00005152 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00005153 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00005154 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00005155 __ vcmp(d7, d6);
5156 __ vmrs(pc); // Move vector status bits to normal status bits.
5157 Label nan;
5158 __ b(vs, &nan);
5159 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
5160 __ mov(r0, Operand(LESS), LeaveCC, lt);
5161 __ mov(r0, Operand(GREATER), LeaveCC, gt);
5162 __ mov(pc, Operand(lr));
5163
5164 __ bind(&nan);
5165 // If one of the sides was a NaN then the v flag is set. Load r0 with
5166 // whatever it takes to make the comparison fail, since comparisons with NaN
5167 // always fail.
5168 if (cc_ == lt || cc_ == le) {
5169 __ mov(r0, Operand(GREATER));
5170 } else {
5171 __ mov(r0, Operand(LESS));
5172 }
Steve Blockd0582a62009-12-15 09:54:21 +00005173 __ mov(pc, Operand(lr));
5174 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00005175 // Checks for NaN in the doubles we have loaded. Can return the answer or
5176 // fall through if neither is a NaN. Also binds lhs_not_nan.
5177 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00005178 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
5179 // answer. Never falls through.
5180 EmitTwoNonNanDoubleComparison(masm, cc_);
5181 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005182
5183 __ bind(&not_smis);
5184 // At this point we know we are dealing with two different objects,
5185 // and neither of them is a Smi. The objects are in r0 and r1.
5186 if (strict_) {
5187 // This returns non-equal for some object types, or falls through if it
5188 // was not lucky.
5189 EmitStrictTwoHeapObjectCompare(masm);
5190 }
5191
5192 Label check_for_symbols;
Leon Clarked91b9f72010-01-27 17:25:45 +00005193 Label flat_string_check;
Steve Blocka7e24c12009-10-30 11:49:00 +00005194 // Check for heap-number-heap-number comparison. Can jump to slow case,
5195 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
5196 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00005197 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00005198 EmitCheckForTwoHeapNumbers(masm,
5199 &both_loaded_as_doubles,
5200 &check_for_symbols,
Leon Clarked91b9f72010-01-27 17:25:45 +00005201 &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005202
5203 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00005204 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
5205 // symbols.
5206 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005207 // Either jumps to slow or returns the answer. Assumes that r2 is the type
5208 // of r0 on entry.
Leon Clarked91b9f72010-01-27 17:25:45 +00005209 EmitCheckForSymbols(masm, &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00005210 }
5211
Leon Clarked91b9f72010-01-27 17:25:45 +00005212 // Check for both being sequential ASCII strings, and inline if that is the
5213 // case.
5214 __ bind(&flat_string_check);
5215
5216 __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
5217
5218 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
5219 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
5220 r1,
5221 r0,
5222 r2,
5223 r3,
5224 r4,
5225 r5);
5226 // Never falls through to here.
5227
Steve Blocka7e24c12009-10-30 11:49:00 +00005228 __ bind(&slow);
Leon Clarked91b9f72010-01-27 17:25:45 +00005229
Steve Blocka7e24c12009-10-30 11:49:00 +00005230 __ push(r1);
5231 __ push(r0);
5232 // Figure out which native to call and setup the arguments.
5233 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00005234 if (cc_ == eq) {
5235 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5236 } else {
5237 native = Builtins::COMPARE;
5238 int ncr; // NaN compare result
5239 if (cc_ == lt || cc_ == le) {
5240 ncr = GREATER;
5241 } else {
5242 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
5243 ncr = LESS;
5244 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005245 __ mov(r0, Operand(Smi::FromInt(ncr)));
5246 __ push(r0);
5247 }
5248
5249 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5250 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00005251 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00005252}
5253
5254
5255// Allocates a heap number or jumps to the label if the young space is full and
5256// a scavenge is needed.
5257static void AllocateHeapNumber(
5258 MacroAssembler* masm,
5259 Label* need_gc, // Jump here if young space is full.
5260 Register result, // The tagged address of the new heap number.
5261 Register scratch1, // A scratch register.
5262 Register scratch2) { // Another scratch register.
5263 // Allocate an object in the heap for the heap number and tag it as a heap
5264 // object.
5265 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
5266 result,
5267 scratch1,
5268 scratch2,
5269 need_gc,
5270 TAG_OBJECT);
5271
5272 // Get heap number map and store it in the allocated object.
5273 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
5274 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5275}
5276
5277
5278// We fall into this code if the operands were Smis, but the result was
5279// not (eg. overflow). We branch into this code (to the not_smi label) if
5280// the operands were not both Smi. The operands are in r0 and r1. In order
5281// to call the C-implemented binary fp operation routines we need to end up
5282// with the double precision floating point operands in r0 and r1 (for the
5283// value in r1) and r2 and r3 (for the value in r0).
5284static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5285 Label* not_smi,
5286 const Builtins::JavaScript& builtin,
5287 Token::Value operation,
5288 OverwriteMode mode) {
5289 Label slow, slow_pop_2_first, do_the_call;
5290 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5291 // Smi-smi case (overflow).
5292 // Since both are Smis there is no heap number to overwrite, so allocate.
5293 // The new heap number is in r5. r6 and r7 are scratch.
5294 AllocateHeapNumber(masm, &slow, r5, r6, r7);
Steve Blockd0582a62009-12-15 09:54:21 +00005295
Leon Clarked91b9f72010-01-27 17:25:45 +00005296 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5297 // using registers d7 and d6 for the double values.
5298 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5299 Token::MOD != operation;
5300 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005301 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005302 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5303 __ vmov(s15, r7);
5304 __ vcvt(d7, s15);
5305 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5306 __ vmov(s13, r7);
5307 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005308 } else {
5309 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5310 __ mov(r7, Operand(r0));
5311 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5312 __ push(lr);
5313 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5314 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5315 __ mov(r7, Operand(r1));
5316 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5317 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5318 __ pop(lr);
5319 }
5320
Steve Blocka7e24c12009-10-30 11:49:00 +00005321 __ jmp(&do_the_call); // Tail call. No return.
5322
5323 // We jump to here if something goes wrong (one param is not a number of any
5324 // sort or new-space allocation fails).
5325 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005326
5327 // Push arguments to the stack
Steve Blocka7e24c12009-10-30 11:49:00 +00005328 __ push(r1);
5329 __ push(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00005330
5331 if (Token::ADD == operation) {
5332 // Test for string arguments before calling runtime.
5333 // r1 : first argument
5334 // r0 : second argument
5335 // sp[0] : second argument
Andrei Popescu31002712010-02-23 13:46:05 +00005336 // sp[4] : first argument
Steve Blockd0582a62009-12-15 09:54:21 +00005337
5338 Label not_strings, not_string1, string1;
5339 __ tst(r1, Operand(kSmiTagMask));
5340 __ b(eq, &not_string1);
5341 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
5342 __ b(ge, &not_string1);
5343
5344 // First argument is a a string, test second.
5345 __ tst(r0, Operand(kSmiTagMask));
5346 __ b(eq, &string1);
5347 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5348 __ b(ge, &string1);
5349
5350 // First and second argument are strings.
Andrei Popescu31002712010-02-23 13:46:05 +00005351 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
5352 __ TailCallStub(&stub);
Steve Blockd0582a62009-12-15 09:54:21 +00005353
5354 // Only first argument is a string.
5355 __ bind(&string1);
5356 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
5357
5358 // First argument was not a string, test second.
5359 __ bind(&not_string1);
5360 __ tst(r0, Operand(kSmiTagMask));
5361 __ b(eq, &not_strings);
5362 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5363 __ b(ge, &not_strings);
5364
5365 // Only second argument is a string.
Steve Blockd0582a62009-12-15 09:54:21 +00005366 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5367
5368 __ bind(&not_strings);
5369 }
5370
Steve Blocka7e24c12009-10-30 11:49:00 +00005371 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5372
5373 // We branch here if at least one of r0 and r1 is not a Smi.
5374 __ bind(not_smi);
5375 if (mode == NO_OVERWRITE) {
5376 // In the case where there is no chance of an overwritable float we may as
5377 // well do the allocation immediately while r0 and r1 are untouched.
5378 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5379 }
5380
5381 // Move r0 to a double in r2-r3.
5382 __ tst(r0, Operand(kSmiTagMask));
5383 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5384 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5385 __ b(ne, &slow);
5386 if (mode == OVERWRITE_RIGHT) {
5387 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5388 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005389 if (use_fp_registers) {
5390 CpuFeatures::Scope scope(VFP3);
5391 // Load the double from tagged HeapNumber r0 to d7.
5392 __ sub(r7, r0, Operand(kHeapObjectTag));
5393 __ vldr(d7, r7, HeapNumber::kValueOffset);
5394 } else {
5395 // Calling convention says that second double is in r2 and r3.
5396 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5397 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5398 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005399 __ jmp(&finished_loading_r0);
5400 __ bind(&r0_is_smi);
5401 if (mode == OVERWRITE_RIGHT) {
5402 // We can't overwrite a Smi so get address of new heap number into r5.
5403 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5404 }
Steve Blockd0582a62009-12-15 09:54:21 +00005405
Leon Clarked91b9f72010-01-27 17:25:45 +00005406 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005407 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005408 // Convert smi in r0 to double in d7.
5409 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5410 __ vmov(s15, r7);
5411 __ vcvt(d7, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005412 } else {
5413 // Write Smi from r0 to r3 and r2 in double format.
5414 __ mov(r7, Operand(r0));
5415 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5416 __ push(lr);
5417 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5418 __ pop(lr);
5419 }
5420
Steve Blocka7e24c12009-10-30 11:49:00 +00005421 __ bind(&finished_loading_r0);
5422
5423 // Move r1 to a double in r0-r1.
5424 __ tst(r1, Operand(kSmiTagMask));
5425 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5426 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5427 __ b(ne, &slow);
5428 if (mode == OVERWRITE_LEFT) {
5429 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5430 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005431 if (use_fp_registers) {
5432 CpuFeatures::Scope scope(VFP3);
5433 // Load the double from tagged HeapNumber r1 to d6.
5434 __ sub(r7, r1, Operand(kHeapObjectTag));
5435 __ vldr(d6, r7, HeapNumber::kValueOffset);
5436 } else {
5437 // Calling convention says that first double is in r0 and r1.
5438 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5439 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5440 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005441 __ jmp(&finished_loading_r1);
5442 __ bind(&r1_is_smi);
5443 if (mode == OVERWRITE_LEFT) {
5444 // We can't overwrite a Smi so get address of new heap number into r5.
5445 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5446 }
Steve Blockd0582a62009-12-15 09:54:21 +00005447
Leon Clarked91b9f72010-01-27 17:25:45 +00005448 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005449 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00005450 // Convert smi in r1 to double in d6.
5451 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5452 __ vmov(s13, r7);
5453 __ vcvt(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00005454 } else {
5455 // Write Smi from r1 to r1 and r0 in double format.
5456 __ mov(r7, Operand(r1));
5457 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5458 __ push(lr);
5459 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5460 __ pop(lr);
5461 }
5462
Steve Blocka7e24c12009-10-30 11:49:00 +00005463 __ bind(&finished_loading_r1);
5464
5465 __ bind(&do_the_call);
Leon Clarked91b9f72010-01-27 17:25:45 +00005466 // If we are inlining the operation using VFP3 instructions for
5467 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5468 if (use_fp_registers) {
Steve Blockd0582a62009-12-15 09:54:21 +00005469 CpuFeatures::Scope scope(VFP3);
5470 // ARMv7 VFP3 instructions to implement
5471 // double precision, add, subtract, multiply, divide.
Steve Blockd0582a62009-12-15 09:54:21 +00005472
5473 if (Token::MUL == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005474 __ vmul(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005475 } else if (Token::DIV == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005476 __ vdiv(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005477 } else if (Token::ADD == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005478 __ vadd(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005479 } else if (Token::SUB == operation) {
Leon Clarkee46be812010-01-19 14:06:41 +00005480 __ vsub(d5, d6, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00005481 } else {
5482 UNREACHABLE();
5483 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005484 __ sub(r0, r5, Operand(kHeapObjectTag));
5485 __ vstr(d5, r0, HeapNumber::kValueOffset);
5486 __ add(r0, r0, Operand(kHeapObjectTag));
Steve Blockd0582a62009-12-15 09:54:21 +00005487 __ mov(pc, lr);
5488 return;
5489 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005490
5491 // If we did not inline the operation, then the arguments are in:
5492 // r0: Left value (least significant part of mantissa).
5493 // r1: Left value (sign, exponent, top of mantissa).
5494 // r2: Right value (least significant part of mantissa).
5495 // r3: Right value (sign, exponent, top of mantissa).
5496 // r5: Address of heap number for result.
5497
Steve Blocka7e24c12009-10-30 11:49:00 +00005498 __ push(lr); // For later.
5499 __ push(r5); // Address of heap number that is answer.
5500 __ AlignStack(0);
5501 // Call C routine that may not cause GC or other trouble.
5502 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5503 __ Call(r5);
5504 __ pop(r4); // Address of heap number.
5505 __ cmp(r4, Operand(Smi::FromInt(0)));
5506 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5507 // Store answer in the overwritable heap number.
5508#if !defined(USE_ARM_EABI)
5509 // Double returned in fp coprocessor register 0 and 1, encoded as register
5510 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5511 // substract the tag from r4.
5512 __ sub(r5, r4, Operand(kHeapObjectTag));
5513 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5514#else
5515 // Double returned in registers 0 and 1.
5516 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5517 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5518#endif
5519 __ mov(r0, Operand(r4));
5520 // And we are done.
5521 __ pop(pc);
5522}
5523
5524
5525// Tries to get a signed int32 out of a double precision floating point heap
5526// number. Rounds towards 0. Fastest for doubles that are in the ranges
5527// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5528// almost to the range of signed int32 values that are not Smis. Jumps to the
5529// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5530// (excluding the endpoints).
5531static void GetInt32(MacroAssembler* masm,
5532 Register source,
5533 Register dest,
5534 Register scratch,
5535 Register scratch2,
5536 Label* slow) {
5537 Label right_exponent, done;
5538 // Get exponent word.
5539 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
5540 // Get exponent alone in scratch2.
5541 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
5542 // Load dest with zero. We use this either for the final shift or
5543 // for the answer.
5544 __ mov(dest, Operand(0));
5545 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
5546 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
5547 // the exponent that we are fastest at and also the highest exponent we can
5548 // handle here.
5549 const uint32_t non_smi_exponent =
5550 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
5551 __ cmp(scratch2, Operand(non_smi_exponent));
5552 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
5553 __ b(eq, &right_exponent);
5554 // If the exponent is higher than that then go to slow case. This catches
5555 // numbers that don't fit in a signed int32, infinities and NaNs.
5556 __ b(gt, slow);
5557
5558 // We know the exponent is smaller than 30 (biased). If it is less than
5559 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
5560 // it rounds to zero.
5561 const uint32_t zero_exponent =
5562 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
5563 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
5564 // Dest already has a Smi zero.
5565 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00005566 if (!CpuFeatures::IsSupported(VFP3)) {
5567 // We have a shifted exponent between 0 and 30 in scratch2.
5568 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
5569 // We now have the exponent in dest. Subtract from 30 to get
5570 // how much to shift down.
5571 __ rsb(dest, dest, Operand(30));
5572 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005573 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00005574 if (CpuFeatures::IsSupported(VFP3)) {
5575 CpuFeatures::Scope scope(VFP3);
5576 // ARMv7 VFP3 instructions implementing double precision to integer
5577 // conversion using round to zero.
5578 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005579 __ vmov(d7, scratch2, scratch);
5580 __ vcvt(s15, d7);
5581 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00005582 } else {
5583 // Get the top bits of the mantissa.
5584 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
5585 // Put back the implicit 1.
5586 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
5587 // Shift up the mantissa bits to take up the space the exponent used to
5588 // take. We just orred in the implicit bit so that took care of one and
5589 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
5590 // distance.
5591 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
5592 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
5593 // Put sign in zero flag.
5594 __ tst(scratch, Operand(HeapNumber::kSignMask));
5595 // Get the second half of the double. For some exponents we don't
5596 // actually need this because the bits get shifted out again, but
5597 // it's probably slower to test than just to do it.
5598 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
5599 // Shift down 22 bits to get the last 10 bits.
5600 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
5601 // Move down according to the exponent.
5602 __ mov(dest, Operand(scratch, LSR, dest));
5603 // Fix sign if sign bit was set.
5604 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
5605 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005606 __ bind(&done);
5607}
5608
Steve Blocka7e24c12009-10-30 11:49:00 +00005609// For bitwise ops where the inputs are not both Smis we here try to determine
5610// whether both inputs are either Smis or at least heap numbers that can be
5611// represented by a 32 bit signed value. We truncate towards zero as required
5612// by the ES spec. If this is the case we do the bitwise op and see if the
5613// result is a Smi. If so, great, otherwise we try to find a heap number to
5614// write the answer into (either by allocating or by overwriting).
5615// On entry the operands are in r0 and r1. On exit the answer is in r0.
5616void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
5617 Label slow, result_not_a_smi;
5618 Label r0_is_smi, r1_is_smi;
5619 Label done_checking_r0, done_checking_r1;
5620
5621 __ tst(r1, Operand(kSmiTagMask));
5622 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5623 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5624 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005625 GetInt32(masm, r1, r3, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005626 __ jmp(&done_checking_r1);
5627 __ bind(&r1_is_smi);
5628 __ mov(r3, Operand(r1, ASR, 1));
5629 __ bind(&done_checking_r1);
5630
5631 __ tst(r0, Operand(kSmiTagMask));
5632 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5633 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5634 __ b(ne, &slow);
Steve Blockd0582a62009-12-15 09:54:21 +00005635 GetInt32(masm, r0, r2, r5, r4, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00005636 __ jmp(&done_checking_r0);
5637 __ bind(&r0_is_smi);
5638 __ mov(r2, Operand(r0, ASR, 1));
5639 __ bind(&done_checking_r0);
5640
5641 // r0 and r1: Original operands (Smi or heap numbers).
5642 // r2 and r3: Signed int32 operands.
5643 switch (op_) {
5644 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
5645 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
5646 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
5647 case Token::SAR:
5648 // Use only the 5 least significant bits of the shift count.
5649 __ and_(r2, r2, Operand(0x1f));
5650 __ mov(r2, Operand(r3, ASR, r2));
5651 break;
5652 case Token::SHR:
5653 // Use only the 5 least significant bits of the shift count.
5654 __ and_(r2, r2, Operand(0x1f));
5655 __ mov(r2, Operand(r3, LSR, r2), SetCC);
5656 // SHR is special because it is required to produce a positive answer.
5657 // The code below for writing into heap numbers isn't capable of writing
5658 // the register as an unsigned int so we go to slow case if we hit this
5659 // case.
5660 __ b(mi, &slow);
5661 break;
5662 case Token::SHL:
5663 // Use only the 5 least significant bits of the shift count.
5664 __ and_(r2, r2, Operand(0x1f));
5665 __ mov(r2, Operand(r3, LSL, r2));
5666 break;
5667 default: UNREACHABLE();
5668 }
5669 // check that the *signed* result fits in a smi
5670 __ add(r3, r2, Operand(0x40000000), SetCC);
5671 __ b(mi, &result_not_a_smi);
5672 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5673 __ Ret();
5674
5675 Label have_to_allocate, got_a_heap_number;
5676 __ bind(&result_not_a_smi);
5677 switch (mode_) {
5678 case OVERWRITE_RIGHT: {
5679 __ tst(r0, Operand(kSmiTagMask));
5680 __ b(eq, &have_to_allocate);
5681 __ mov(r5, Operand(r0));
5682 break;
5683 }
5684 case OVERWRITE_LEFT: {
5685 __ tst(r1, Operand(kSmiTagMask));
5686 __ b(eq, &have_to_allocate);
5687 __ mov(r5, Operand(r1));
5688 break;
5689 }
5690 case NO_OVERWRITE: {
5691 // Get a new heap number in r5. r6 and r7 are scratch.
5692 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5693 }
5694 default: break;
5695 }
5696 __ bind(&got_a_heap_number);
5697 // r2: Answer as signed int32.
5698 // r5: Heap number to write answer into.
5699
5700 // Nothing can go wrong now, so move the heap number to r0, which is the
5701 // result.
5702 __ mov(r0, Operand(r5));
5703
5704 // Tail call that writes the int32 in r2 to the heap number in r0, using
5705 // r3 as scratch. r0 is preserved and returned.
5706 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
5707 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
5708
5709 if (mode_ != NO_OVERWRITE) {
5710 __ bind(&have_to_allocate);
5711 // Get a new heap number in r5. r6 and r7 are scratch.
5712 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5713 __ jmp(&got_a_heap_number);
5714 }
5715
5716 // If all else failed then we go to the runtime system.
5717 __ bind(&slow);
5718 __ push(r1); // restore stack
5719 __ push(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005720 switch (op_) {
5721 case Token::BIT_OR:
5722 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
5723 break;
5724 case Token::BIT_AND:
5725 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
5726 break;
5727 case Token::BIT_XOR:
5728 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
5729 break;
5730 case Token::SAR:
5731 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
5732 break;
5733 case Token::SHR:
5734 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
5735 break;
5736 case Token::SHL:
5737 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5738 break;
5739 default:
5740 UNREACHABLE();
5741 }
5742}
5743
5744
5745// Can we multiply by x with max two shifts and an add.
5746// This answers yes to all integers from 2 to 10.
5747static bool IsEasyToMultiplyBy(int x) {
5748 if (x < 2) return false; // Avoid special cases.
5749 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5750 if (IsPowerOf2(x)) return true; // Simple shift.
5751 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5752 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5753 return false;
5754}
5755
5756
5757// Can multiply by anything that IsEasyToMultiplyBy returns true for.
5758// Source and destination may be the same register. This routine does
5759// not set carry and overflow the way a mul instruction would.
5760static void MultiplyByKnownInt(MacroAssembler* masm,
5761 Register source,
5762 Register destination,
5763 int known_int) {
5764 if (IsPowerOf2(known_int)) {
5765 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5766 } else if (PopCountLessThanEqual2(known_int)) {
5767 int first_bit = BitPosition(known_int);
5768 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5769 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5770 if (first_bit != 0) {
5771 __ mov(destination, Operand(destination, LSL, first_bit));
5772 }
5773 } else {
5774 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5775 int the_bit = BitPosition(known_int + 1);
5776 __ rsb(destination, source, Operand(source, LSL, the_bit));
5777 }
5778}
5779
5780
5781// This function (as opposed to MultiplyByKnownInt) takes the known int in a
5782// a register for the cases where it doesn't know a good trick, and may deliver
5783// a result that needs shifting.
5784static void MultiplyByKnownInt2(
5785 MacroAssembler* masm,
5786 Register result,
5787 Register source,
5788 Register known_int_register, // Smi tagged.
5789 int known_int,
5790 int* required_shift) { // Including Smi tag shift
5791 switch (known_int) {
5792 case 3:
5793 __ add(result, source, Operand(source, LSL, 1));
5794 *required_shift = 1;
5795 break;
5796 case 5:
5797 __ add(result, source, Operand(source, LSL, 2));
5798 *required_shift = 1;
5799 break;
5800 case 6:
5801 __ add(result, source, Operand(source, LSL, 1));
5802 *required_shift = 2;
5803 break;
5804 case 7:
5805 __ rsb(result, source, Operand(source, LSL, 3));
5806 *required_shift = 1;
5807 break;
5808 case 9:
5809 __ add(result, source, Operand(source, LSL, 3));
5810 *required_shift = 1;
5811 break;
5812 case 10:
5813 __ add(result, source, Operand(source, LSL, 2));
5814 *required_shift = 2;
5815 break;
5816 default:
5817 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5818 __ mul(result, source, known_int_register);
5819 *required_shift = 0;
5820 }
5821}
5822
5823
Leon Clarkee46be812010-01-19 14:06:41 +00005824const char* GenericBinaryOpStub::GetName() {
5825 if (name_ != NULL) return name_;
5826 const int len = 100;
5827 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
5828 if (name_ == NULL) return "OOM";
5829 const char* op_name = Token::Name(op_);
5830 const char* overwrite_name;
5831 switch (mode_) {
5832 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
5833 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
5834 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
5835 default: overwrite_name = "UnknownOverwrite"; break;
5836 }
5837
5838 OS::SNPrintF(Vector<char>(name_, len),
5839 "GenericBinaryOpStub_%s_%s%s",
5840 op_name,
5841 overwrite_name,
5842 specialized_on_rhs_ ? "_ConstantRhs" : 0);
5843 return name_;
5844}
5845
5846
Andrei Popescu31002712010-02-23 13:46:05 +00005847
Steve Blocka7e24c12009-10-30 11:49:00 +00005848void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5849 // r1 : x
5850 // r0 : y
5851 // result : r0
5852
5853 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5854 // tell us that.
5855 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5856
5857 switch (op_) {
5858 case Token::ADD: {
5859 Label not_smi;
5860 // Fast path.
5861 ASSERT(kSmiTag == 0); // Adjust code below.
5862 __ tst(r2, Operand(kSmiTagMask));
5863 __ b(ne, &not_smi);
5864 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
5865 // Return if no overflow.
5866 __ Ret(vc);
5867 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
5868
5869 HandleBinaryOpSlowCases(masm,
5870 &not_smi,
5871 Builtins::ADD,
5872 Token::ADD,
5873 mode_);
5874 break;
5875 }
5876
5877 case Token::SUB: {
5878 Label not_smi;
5879 // Fast path.
5880 ASSERT(kSmiTag == 0); // Adjust code below.
5881 __ tst(r2, Operand(kSmiTagMask));
5882 __ b(ne, &not_smi);
5883 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
5884 // Return if no overflow.
5885 __ Ret(vc);
5886 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
5887
5888 HandleBinaryOpSlowCases(masm,
5889 &not_smi,
5890 Builtins::SUB,
5891 Token::SUB,
5892 mode_);
5893 break;
5894 }
5895
5896 case Token::MUL: {
5897 Label not_smi, slow;
5898 ASSERT(kSmiTag == 0); // adjust code below
5899 __ tst(r2, Operand(kSmiTagMask));
5900 __ b(ne, &not_smi);
5901 // Remove tag from one operand (but keep sign), so that result is Smi.
5902 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
5903 // Do multiplication
5904 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
5905 // Go slow on overflows (overflow bit is not set).
5906 __ mov(ip, Operand(r3, ASR, 31));
5907 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
5908 __ b(ne, &slow);
5909 // Go slow on zero result to handle -0.
5910 __ tst(r3, Operand(r3));
5911 __ mov(r0, Operand(r3), LeaveCC, ne);
5912 __ Ret(ne);
5913 // We need -0 if we were multiplying a negative number with 0 to get 0.
5914 // We know one of them was zero.
5915 __ add(r2, r0, Operand(r1), SetCC);
5916 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5917 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5918 // Slow case. We fall through here if we multiplied a negative number
5919 // with 0, because that would mean we should produce -0.
5920 __ bind(&slow);
5921
5922 HandleBinaryOpSlowCases(masm,
5923 &not_smi,
5924 Builtins::MUL,
5925 Token::MUL,
5926 mode_);
5927 break;
5928 }
5929
5930 case Token::DIV:
5931 case Token::MOD: {
5932 Label not_smi;
5933 if (specialized_on_rhs_) {
5934 Label smi_is_unsuitable;
5935 __ BranchOnNotSmi(r1, &not_smi);
5936 if (IsPowerOf2(constant_rhs_)) {
5937 if (op_ == Token::MOD) {
5938 __ and_(r0,
5939 r1,
5940 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
5941 SetCC);
5942 // We now have the answer, but if the input was negative we also
5943 // have the sign bit. Our work is done if the result is
5944 // positive or zero:
5945 __ Ret(pl);
5946 // A mod of a negative left hand side must return a negative number.
5947 // Unfortunately if the answer is 0 then we must return -0. And we
5948 // already optimistically trashed r0 so we may need to restore it.
5949 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5950 // Next two instructions are conditional on the answer being -0.
5951 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
5952 __ b(eq, &smi_is_unsuitable);
5953 // We need to subtract the dividend. Eg. -3 % 4 == -3.
5954 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
5955 } else {
5956 ASSERT(op_ == Token::DIV);
5957 __ tst(r1,
5958 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
5959 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5960 int shift = 0;
5961 int d = constant_rhs_;
5962 while ((d & 1) == 0) {
5963 d >>= 1;
5964 shift++;
5965 }
5966 __ mov(r0, Operand(r1, LSR, shift));
5967 __ bic(r0, r0, Operand(kSmiTagMask));
5968 }
5969 } else {
5970 // Not a power of 2.
5971 __ tst(r1, Operand(0x80000000u));
5972 __ b(ne, &smi_is_unsuitable);
5973 // Find a fixed point reciprocal of the divisor so we can divide by
5974 // multiplying.
5975 double divisor = 1.0 / constant_rhs_;
5976 int shift = 32;
5977 double scale = 4294967296.0; // 1 << 32.
5978 uint32_t mul;
5979 // Maximise the precision of the fixed point reciprocal.
5980 while (true) {
5981 mul = static_cast<uint32_t>(scale * divisor);
5982 if (mul >= 0x7fffffff) break;
5983 scale *= 2.0;
5984 shift++;
5985 }
5986 mul++;
5987 __ mov(r2, Operand(mul));
5988 __ umull(r3, r2, r2, r1);
5989 __ mov(r2, Operand(r2, LSR, shift - 31));
5990 // r2 is r1 / rhs. r2 is not Smi tagged.
5991 // r0 is still the known rhs. r0 is Smi tagged.
5992 // r1 is still the unkown lhs. r1 is Smi tagged.
5993 int required_r4_shift = 0; // Including the Smi tag shift of 1.
5994 // r4 = r2 * r0.
5995 MultiplyByKnownInt2(masm,
5996 r4,
5997 r2,
5998 r0,
5999 constant_rhs_,
6000 &required_r4_shift);
6001 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
6002 if (op_ == Token::DIV) {
6003 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
6004 __ b(ne, &smi_is_unsuitable); // There was a remainder.
6005 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
6006 } else {
6007 ASSERT(op_ == Token::MOD);
6008 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
6009 }
6010 }
6011 __ Ret();
6012 __ bind(&smi_is_unsuitable);
6013 } else {
6014 __ jmp(&not_smi);
6015 }
6016 HandleBinaryOpSlowCases(masm,
6017 &not_smi,
6018 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
6019 op_,
6020 mode_);
6021 break;
6022 }
6023
6024 case Token::BIT_OR:
6025 case Token::BIT_AND:
6026 case Token::BIT_XOR:
6027 case Token::SAR:
6028 case Token::SHR:
6029 case Token::SHL: {
6030 Label slow;
6031 ASSERT(kSmiTag == 0); // adjust code below
6032 __ tst(r2, Operand(kSmiTagMask));
6033 __ b(ne, &slow);
6034 switch (op_) {
6035 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
6036 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
6037 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
6038 case Token::SAR:
6039 // Remove tags from right operand.
Andrei Popescu31002712010-02-23 13:46:05 +00006040 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006041 __ mov(r0, Operand(r1, ASR, r2));
6042 // Smi tag result.
6043 __ bic(r0, r0, Operand(kSmiTagMask));
6044 break;
6045 case Token::SHR:
6046 // Remove tags from operands. We can't do this on a 31 bit number
6047 // because then the 0s get shifted into bit 30 instead of bit 31.
6048 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
Andrei Popescu31002712010-02-23 13:46:05 +00006049 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006050 __ mov(r3, Operand(r3, LSR, r2));
6051 // Unsigned shift is not allowed to produce a negative number, so
6052 // check the sign bit and the sign bit after Smi tagging.
6053 __ tst(r3, Operand(0xc0000000));
6054 __ b(ne, &slow);
6055 // Smi tag result.
6056 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6057 break;
6058 case Token::SHL:
6059 // Remove tags from operands.
6060 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
Andrei Popescu31002712010-02-23 13:46:05 +00006061 __ GetLeastBitsFromSmi(r2, r0, 5);
Steve Blocka7e24c12009-10-30 11:49:00 +00006062 __ mov(r3, Operand(r3, LSL, r2));
6063 // Check that the signed result fits in a Smi.
6064 __ add(r2, r3, Operand(0x40000000), SetCC);
6065 __ b(mi, &slow);
6066 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
6067 break;
6068 default: UNREACHABLE();
6069 }
6070 __ Ret();
6071 __ bind(&slow);
6072 HandleNonSmiBitwiseOp(masm);
6073 break;
6074 }
6075
6076 default: UNREACHABLE();
6077 }
6078 // This code should be unreachable.
6079 __ stop("Unreachable");
6080}
6081
6082
6083void StackCheckStub::Generate(MacroAssembler* masm) {
6084 // Do tail-call to runtime routine. Runtime routines expect at least one
6085 // argument, so give it a Smi.
6086 __ mov(r0, Operand(Smi::FromInt(0)));
6087 __ push(r0);
6088 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
6089
6090 __ StubReturn(1);
6091}
6092
6093
Leon Clarkee46be812010-01-19 14:06:41 +00006094void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Leon Clarke4515c472010-02-03 11:58:03 +00006095 Label slow, done;
Leon Clarkee46be812010-01-19 14:06:41 +00006096
Leon Clarke4515c472010-02-03 11:58:03 +00006097 if (op_ == Token::SUB) {
6098 // Check whether the value is a smi.
6099 Label try_float;
6100 __ tst(r0, Operand(kSmiTagMask));
6101 __ b(ne, &try_float);
Steve Blocka7e24c12009-10-30 11:49:00 +00006102
Leon Clarke4515c472010-02-03 11:58:03 +00006103 // Go slow case if the value of the expression is zero
6104 // to make sure that we switch between 0 and -0.
6105 __ cmp(r0, Operand(0));
6106 __ b(eq, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006107
Leon Clarke4515c472010-02-03 11:58:03 +00006108 // The value of the expression is a smi that is not zero. Try
6109 // optimistic subtraction '0 - value'.
6110 __ rsb(r1, r0, Operand(0), SetCC);
6111 __ b(vs, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006112
Leon Clarke4515c472010-02-03 11:58:03 +00006113 __ mov(r0, Operand(r1)); // Set r0 to result.
6114 __ b(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006115
Leon Clarke4515c472010-02-03 11:58:03 +00006116 __ bind(&try_float);
6117 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6118 __ b(ne, &slow);
6119 // r0 is a heap number. Get a new heap number in r1.
6120 if (overwrite_) {
6121 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6122 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6123 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6124 } else {
6125 AllocateHeapNumber(masm, &slow, r1, r2, r3);
6126 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6127 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6128 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
6129 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
6130 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
6131 __ mov(r0, Operand(r1));
6132 }
6133 } else if (op_ == Token::BIT_NOT) {
6134 // Check if the operand is a heap number.
6135 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
6136 __ b(ne, &slow);
6137
6138 // Convert the heap number is r0 to an untagged integer in r1.
6139 GetInt32(masm, r0, r1, r2, r3, &slow);
6140
6141 // Do the bitwise operation (move negated) and check if the result
6142 // fits in a smi.
6143 Label try_float;
6144 __ mvn(r1, Operand(r1));
6145 __ add(r2, r1, Operand(0x40000000), SetCC);
6146 __ b(mi, &try_float);
6147 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
6148 __ b(&done);
6149
6150 __ bind(&try_float);
6151 if (!overwrite_) {
6152 // Allocate a fresh heap number, but don't overwrite r0 until
6153 // we're sure we can do it without going through the slow case
6154 // that needs the value in r0.
6155 AllocateHeapNumber(masm, &slow, r2, r3, r4);
6156 __ mov(r0, Operand(r2));
6157 }
6158
6159 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
6160 // have to set up a frame.
6161 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
6162 __ push(lr);
6163 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
6164 __ pop(lr);
6165 } else {
6166 UNIMPLEMENTED();
6167 }
6168
6169 __ bind(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00006170 __ StubReturn(1);
6171
Leon Clarke4515c472010-02-03 11:58:03 +00006172 // Handle the slow case by jumping to the JavaScript builtin.
Steve Blocka7e24c12009-10-30 11:49:00 +00006173 __ bind(&slow);
6174 __ push(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00006175 switch (op_) {
6176 case Token::SUB:
6177 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
6178 break;
6179 case Token::BIT_NOT:
6180 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
6181 break;
6182 default:
6183 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00006184 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006185}
6186
6187
6188void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6189 // r0 holds the exception.
6190
6191 // Adjust this code if not the case.
6192 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6193
6194 // Drop the sp to the top of the handler.
6195 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6196 __ ldr(sp, MemOperand(r3));
6197
6198 // Restore the next handler and frame pointer, discard handler state.
6199 ASSERT(StackHandlerConstants::kNextOffset == 0);
6200 __ pop(r2);
6201 __ str(r2, MemOperand(r3));
6202 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6203 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
6204
6205 // Before returning we restore the context from the frame pointer if
6206 // not NULL. The frame pointer is NULL in the exception handler of a
6207 // JS entry frame.
6208 __ cmp(fp, Operand(0));
6209 // Set cp to NULL if fp is NULL.
6210 __ mov(cp, Operand(0), LeaveCC, eq);
6211 // Restore cp otherwise.
6212 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6213#ifdef DEBUG
6214 if (FLAG_debug_code) {
6215 __ mov(lr, Operand(pc));
6216 }
6217#endif
6218 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6219 __ pop(pc);
6220}
6221
6222
6223void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
6224 UncatchableExceptionType type) {
6225 // Adjust this code if not the case.
6226 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
6227
6228 // Drop sp to the top stack handler.
6229 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
6230 __ ldr(sp, MemOperand(r3));
6231
6232 // Unwind the handlers until the ENTRY handler is found.
6233 Label loop, done;
6234 __ bind(&loop);
6235 // Load the type of the current stack handler.
6236 const int kStateOffset = StackHandlerConstants::kStateOffset;
6237 __ ldr(r2, MemOperand(sp, kStateOffset));
6238 __ cmp(r2, Operand(StackHandler::ENTRY));
6239 __ b(eq, &done);
6240 // Fetch the next handler in the list.
6241 const int kNextOffset = StackHandlerConstants::kNextOffset;
6242 __ ldr(sp, MemOperand(sp, kNextOffset));
6243 __ jmp(&loop);
6244 __ bind(&done);
6245
6246 // Set the top handler address to next handler past the current ENTRY handler.
6247 ASSERT(StackHandlerConstants::kNextOffset == 0);
6248 __ pop(r2);
6249 __ str(r2, MemOperand(r3));
6250
6251 if (type == OUT_OF_MEMORY) {
6252 // Set external caught exception to false.
6253 ExternalReference external_caught(Top::k_external_caught_exception_address);
6254 __ mov(r0, Operand(false));
6255 __ mov(r2, Operand(external_caught));
6256 __ str(r0, MemOperand(r2));
6257
6258 // Set pending exception and r0 to out of memory exception.
6259 Failure* out_of_memory = Failure::OutOfMemoryException();
6260 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6261 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
6262 __ str(r0, MemOperand(r2));
6263 }
6264
6265 // Stack layout at this point. See also StackHandlerConstants.
6266 // sp -> state (ENTRY)
6267 // fp
6268 // lr
6269
6270 // Discard handler state (r2 is not used) and restore frame pointer.
6271 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
6272 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
6273 // Before returning we restore the context from the frame pointer if
6274 // not NULL. The frame pointer is NULL in the exception handler of a
6275 // JS entry frame.
6276 __ cmp(fp, Operand(0));
6277 // Set cp to NULL if fp is NULL.
6278 __ mov(cp, Operand(0), LeaveCC, eq);
6279 // Restore cp otherwise.
6280 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
6281#ifdef DEBUG
6282 if (FLAG_debug_code) {
6283 __ mov(lr, Operand(pc));
6284 }
6285#endif
6286 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
6287 __ pop(pc);
6288}
6289
6290
6291void CEntryStub::GenerateCore(MacroAssembler* masm,
6292 Label* throw_normal_exception,
6293 Label* throw_termination_exception,
6294 Label* throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006295 bool do_gc,
6296 bool always_allocate) {
6297 // r0: result parameter for PerformGC, if any
6298 // r4: number of arguments including receiver (C callee-saved)
6299 // r5: pointer to builtin function (C callee-saved)
6300 // r6: pointer to the first argument (C callee-saved)
6301
6302 if (do_gc) {
6303 // Passing r0.
6304 ExternalReference gc_reference = ExternalReference::perform_gc_function();
6305 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
6306 }
6307
6308 ExternalReference scope_depth =
6309 ExternalReference::heap_always_allocate_scope_depth();
6310 if (always_allocate) {
6311 __ mov(r0, Operand(scope_depth));
6312 __ ldr(r1, MemOperand(r0));
6313 __ add(r1, r1, Operand(1));
6314 __ str(r1, MemOperand(r0));
6315 }
6316
6317 // Call C built-in.
6318 // r0 = argc, r1 = argv
6319 __ mov(r0, Operand(r4));
6320 __ mov(r1, Operand(r6));
6321
6322 // TODO(1242173): To let the GC traverse the return address of the exit
6323 // frames, we need to know where the return address is. Right now,
6324 // we push it on the stack to be able to find it again, but we never
6325 // restore from it in case of changes, which makes it impossible to
6326 // support moving the C entry code stub. This should be fixed, but currently
6327 // this is OK because the CEntryStub gets generated so early in the V8 boot
6328 // sequence that it is not moving ever.
6329 masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
6330 masm->push(lr);
6331 masm->Jump(r5);
6332
6333 if (always_allocate) {
6334 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
6335 // though (contain the result).
6336 __ mov(r2, Operand(scope_depth));
6337 __ ldr(r3, MemOperand(r2));
6338 __ sub(r3, r3, Operand(1));
6339 __ str(r3, MemOperand(r2));
6340 }
6341
6342 // check for failure result
6343 Label failure_returned;
6344 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
6345 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
6346 __ add(r2, r0, Operand(1));
6347 __ tst(r2, Operand(kFailureTagMask));
6348 __ b(eq, &failure_returned);
6349
6350 // Exit C frame and return.
6351 // r0:r1: result
6352 // sp: stack pointer
6353 // fp: frame pointer
Leon Clarke4515c472010-02-03 11:58:03 +00006354 __ LeaveExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006355
6356 // check if we should retry or throw exception
6357 Label retry;
6358 __ bind(&failure_returned);
6359 ASSERT(Failure::RETRY_AFTER_GC == 0);
6360 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
6361 __ b(eq, &retry);
6362
6363 // Special handling of out of memory exceptions.
6364 Failure* out_of_memory = Failure::OutOfMemoryException();
6365 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
6366 __ b(eq, throw_out_of_memory_exception);
6367
6368 // Retrieve the pending exception and clear the variable.
6369 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6370 __ ldr(r3, MemOperand(ip));
6371 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6372 __ ldr(r0, MemOperand(ip));
6373 __ str(r3, MemOperand(ip));
6374
6375 // Special handling of termination exceptions which are uncatchable
6376 // by javascript code.
6377 __ cmp(r0, Operand(Factory::termination_exception()));
6378 __ b(eq, throw_termination_exception);
6379
6380 // Handle normal exception.
6381 __ jmp(throw_normal_exception);
6382
6383 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
6384}
6385
6386
Leon Clarke4515c472010-02-03 11:58:03 +00006387void CEntryStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006388 // Called from JavaScript; parameters are on stack as if calling JS function
6389 // r0: number of arguments including receiver
6390 // r1: pointer to builtin function
6391 // fp: frame pointer (restored after C call)
6392 // sp: stack pointer (restored as callee's sp after C call)
6393 // cp: current context (C callee-saved)
6394
Leon Clarke4515c472010-02-03 11:58:03 +00006395 // Result returned in r0 or r0+r1 by default.
6396
Steve Blocka7e24c12009-10-30 11:49:00 +00006397 // NOTE: Invocations of builtins may return failure objects
6398 // instead of a proper result. The builtin entry handles
6399 // this by performing a garbage collection and retrying the
6400 // builtin once.
6401
Steve Blocka7e24c12009-10-30 11:49:00 +00006402 // Enter the exit frame that transitions from JavaScript to C++.
Leon Clarke4515c472010-02-03 11:58:03 +00006403 __ EnterExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006404
6405 // r4: number of arguments (C callee-saved)
6406 // r5: pointer to builtin function (C callee-saved)
6407 // r6: pointer to first argument (C callee-saved)
6408
6409 Label throw_normal_exception;
6410 Label throw_termination_exception;
6411 Label throw_out_of_memory_exception;
6412
6413 // Call into the runtime system.
6414 GenerateCore(masm,
6415 &throw_normal_exception,
6416 &throw_termination_exception,
6417 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006418 false,
6419 false);
6420
6421 // Do space-specific GC and retry runtime call.
6422 GenerateCore(masm,
6423 &throw_normal_exception,
6424 &throw_termination_exception,
6425 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006426 true,
6427 false);
6428
6429 // Do full GC and retry runtime call one final time.
6430 Failure* failure = Failure::InternalError();
6431 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
6432 GenerateCore(masm,
6433 &throw_normal_exception,
6434 &throw_termination_exception,
6435 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00006436 true,
6437 true);
6438
6439 __ bind(&throw_out_of_memory_exception);
6440 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
6441
6442 __ bind(&throw_termination_exception);
6443 GenerateThrowUncatchable(masm, TERMINATION);
6444
6445 __ bind(&throw_normal_exception);
6446 GenerateThrowTOS(masm);
6447}
6448
6449
6450void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
6451 // r0: code entry
6452 // r1: function
6453 // r2: receiver
6454 // r3: argc
6455 // [sp+0]: argv
6456
6457 Label invoke, exit;
6458
6459 // Called from C, so do not pop argc and args on exit (preserve sp)
6460 // No need to save register-passed args
6461 // Save callee-saved registers (incl. cp and fp), sp, and lr
6462 __ stm(db_w, sp, kCalleeSaved | lr.bit());
6463
6464 // Get address of argv, see stm above.
6465 // r0: code entry
6466 // r1: function
6467 // r2: receiver
6468 // r3: argc
Leon Clarke4515c472010-02-03 11:58:03 +00006469 __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
Steve Blocka7e24c12009-10-30 11:49:00 +00006470
6471 // Push a frame with special values setup to mark it as an entry frame.
6472 // r0: code entry
6473 // r1: function
6474 // r2: receiver
6475 // r3: argc
6476 // r4: argv
6477 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
6478 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
6479 __ mov(r7, Operand(Smi::FromInt(marker)));
6480 __ mov(r6, Operand(Smi::FromInt(marker)));
6481 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6482 __ ldr(r5, MemOperand(r5));
6483 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
6484
6485 // Setup frame pointer for the frame to be pushed.
6486 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6487
6488 // Call a faked try-block that does the invoke.
6489 __ bl(&invoke);
6490
6491 // Caught exception: Store result (exception) in the pending
6492 // exception field in the JSEnv and return a failure sentinel.
6493 // Coming in here the fp will be invalid because the PushTryHandler below
6494 // sets it to 0 to signal the existence of the JSEntry frame.
6495 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6496 __ str(r0, MemOperand(ip));
6497 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
6498 __ b(&exit);
6499
6500 // Invoke: Link this frame into the handler chain.
6501 __ bind(&invoke);
6502 // Must preserve r0-r4, r5-r7 are available.
6503 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
6504 // If an exception not caught by another handler occurs, this handler
6505 // returns control to the code after the bl(&invoke) above, which
6506 // restores all kCalleeSaved registers (including cp and fp) to their
6507 // saved values before returning a failure to C.
6508
6509 // Clear any pending exceptions.
6510 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
6511 __ ldr(r5, MemOperand(ip));
6512 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
6513 __ str(r5, MemOperand(ip));
6514
6515 // Invoke the function by calling through JS entry trampoline builtin.
6516 // Notice that we cannot store a reference to the trampoline code directly in
6517 // this stub, because runtime stubs are not traversed when doing GC.
6518
6519 // Expected registers by Builtins::JSEntryTrampoline
6520 // r0: code entry
6521 // r1: function
6522 // r2: receiver
6523 // r3: argc
6524 // r4: argv
6525 if (is_construct) {
6526 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
6527 __ mov(ip, Operand(construct_entry));
6528 } else {
6529 ExternalReference entry(Builtins::JSEntryTrampoline);
6530 __ mov(ip, Operand(entry));
6531 }
6532 __ ldr(ip, MemOperand(ip)); // deref address
6533
6534 // Branch and link to JSEntryTrampoline. We don't use the double underscore
6535 // macro for the add instruction because we don't want the coverage tool
6536 // inserting instructions here after we read the pc.
6537 __ mov(lr, Operand(pc));
6538 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
6539
6540 // Unlink this frame from the handler chain. When reading the
6541 // address of the next handler, there is no need to use the address
6542 // displacement since the current stack pointer (sp) points directly
6543 // to the stack handler.
6544 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
6545 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
6546 __ str(r3, MemOperand(ip));
6547 // No need to restore registers
6548 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
6549
6550
6551 __ bind(&exit); // r0 holds result
6552 // Restore the top frame descriptors from the stack.
6553 __ pop(r3);
6554 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
6555 __ str(r3, MemOperand(ip));
6556
6557 // Reset the stack to the callee saved registers.
6558 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
6559
6560 // Restore callee-saved registers and return.
6561#ifdef DEBUG
6562 if (FLAG_debug_code) {
6563 __ mov(lr, Operand(pc));
6564 }
6565#endif
6566 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
6567}
6568
6569
6570// This stub performs an instanceof, calling the builtin function if
6571// necessary. Uses r1 for the object, r0 for the function that it may
6572// be an instance of (these are fetched from the stack).
6573void InstanceofStub::Generate(MacroAssembler* masm) {
6574 // Get the object - slow case for smis (we may need to throw an exception
6575 // depending on the rhs).
6576 Label slow, loop, is_instance, is_not_instance;
6577 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6578 __ BranchOnSmi(r0, &slow);
6579
6580 // Check that the left hand is a JS object and put map in r3.
6581 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
6582 __ b(lt, &slow);
6583 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
6584 __ b(gt, &slow);
6585
6586 // Get the prototype of the function (r4 is result, r2 is scratch).
6587 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
6588 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
6589
6590 // Check that the function prototype is a JS object.
6591 __ BranchOnSmi(r4, &slow);
6592 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
6593 __ b(lt, &slow);
6594 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
6595 __ b(gt, &slow);
6596
6597 // Register mapping: r3 is object map and r4 is function prototype.
6598 // Get prototype of object into r2.
6599 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
6600
6601 // Loop through the prototype chain looking for the function prototype.
6602 __ bind(&loop);
6603 __ cmp(r2, Operand(r4));
6604 __ b(eq, &is_instance);
6605 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6606 __ cmp(r2, ip);
6607 __ b(eq, &is_not_instance);
6608 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
6609 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
6610 __ jmp(&loop);
6611
6612 __ bind(&is_instance);
6613 __ mov(r0, Operand(Smi::FromInt(0)));
6614 __ pop();
6615 __ pop();
6616 __ mov(pc, Operand(lr)); // Return.
6617
6618 __ bind(&is_not_instance);
6619 __ mov(r0, Operand(Smi::FromInt(1)));
6620 __ pop();
6621 __ pop();
6622 __ mov(pc, Operand(lr)); // Return.
6623
6624 // Slow-case. Tail call builtin.
6625 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00006626 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
6627}
6628
6629
6630void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6631 // Check if the calling frame is an arguments adaptor frame.
6632 Label adaptor;
6633 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6634 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6635 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6636 __ b(eq, &adaptor);
6637
6638 // Nothing to do: The formal number of parameters has already been
6639 // passed in register r0 by calling function. Just return it.
6640 __ Jump(lr);
6641
6642 // Arguments adaptor case: Read the arguments length from the
6643 // adaptor frame and return it.
6644 __ bind(&adaptor);
6645 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6646 __ Jump(lr);
6647}
6648
6649
6650void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6651 // The displacement is the offset of the last parameter (if any)
6652 // relative to the frame pointer.
6653 static const int kDisplacement =
6654 StandardFrameConstants::kCallerSPOffset - kPointerSize;
6655
6656 // Check that the key is a smi.
6657 Label slow;
6658 __ BranchOnNotSmi(r1, &slow);
6659
6660 // Check if the calling frame is an arguments adaptor frame.
6661 Label adaptor;
6662 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6663 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6664 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6665 __ b(eq, &adaptor);
6666
6667 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00006668 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00006669 // check for free.
6670 __ cmp(r1, r0);
6671 __ b(cs, &slow);
6672
6673 // Read the argument from the stack and return it.
6674 __ sub(r3, r0, r1);
6675 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6676 __ ldr(r0, MemOperand(r3, kDisplacement));
6677 __ Jump(lr);
6678
6679 // Arguments adaptor case: Check index against actual arguments
6680 // limit found in the arguments adaptor frame. Use unsigned
6681 // comparison to get negative check for free.
6682 __ bind(&adaptor);
6683 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6684 __ cmp(r1, r0);
6685 __ b(cs, &slow);
6686
6687 // Read the argument from the adaptor frame and return it.
6688 __ sub(r3, r0, r1);
6689 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
6690 __ ldr(r0, MemOperand(r3, kDisplacement));
6691 __ Jump(lr);
6692
6693 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6694 // by calling the runtime system.
6695 __ bind(&slow);
6696 __ push(r1);
6697 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
6698}
6699
6700
6701void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6702 // Check if the calling frame is an arguments adaptor frame.
6703 Label runtime;
6704 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6705 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
6706 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6707 __ b(ne, &runtime);
6708
6709 // Patch the arguments.length and the parameters pointer.
6710 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
6711 __ str(r0, MemOperand(sp, 0 * kPointerSize));
6712 __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6713 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
6714 __ str(r3, MemOperand(sp, 1 * kPointerSize));
6715
6716 // Do the runtime call to allocate the arguments object.
6717 __ bind(&runtime);
6718 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
6719}
6720
6721
6722void CallFunctionStub::Generate(MacroAssembler* masm) {
6723 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00006724
6725 // If the receiver might be a value (string, number or boolean) check for this
6726 // and box it if it is.
6727 if (ReceiverMightBeValue()) {
6728 // Get the receiver from the stack.
6729 // function, receiver [, arguments]
6730 Label receiver_is_value, receiver_is_js_object;
6731 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
6732
6733 // Check if receiver is a smi (which is a number value).
6734 __ BranchOnSmi(r1, &receiver_is_value);
6735
6736 // Check if the receiver is a valid JS object.
6737 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
6738 __ b(ge, &receiver_is_js_object);
6739
6740 // Call the runtime to box the value.
6741 __ bind(&receiver_is_value);
6742 __ EnterInternalFrame();
6743 __ push(r1);
6744 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
6745 __ LeaveInternalFrame();
6746 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
6747
6748 __ bind(&receiver_is_js_object);
6749 }
6750
Steve Blocka7e24c12009-10-30 11:49:00 +00006751 // Get the function to call from the stack.
6752 // function, receiver [, arguments]
6753 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
6754
6755 // Check that the function is really a JavaScript function.
6756 // r1: pushed function (to be verified)
6757 __ BranchOnSmi(r1, &slow);
6758 // Get the map of the function object.
6759 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
6760 __ b(ne, &slow);
6761
6762 // Fast-case: Invoke the function now.
6763 // r1: pushed function
6764 ParameterCount actual(argc_);
6765 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
6766
6767 // Slow-case: Non-function called.
6768 __ bind(&slow);
6769 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
6770 __ mov(r2, Operand(0));
6771 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
6772 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
6773 RelocInfo::CODE_TARGET);
6774}
6775
6776
Leon Clarkee46be812010-01-19 14:06:41 +00006777const char* CompareStub::GetName() {
6778 switch (cc_) {
6779 case lt: return "CompareStub_LT";
6780 case gt: return "CompareStub_GT";
6781 case le: return "CompareStub_LE";
6782 case ge: return "CompareStub_GE";
6783 case ne: {
6784 if (strict_) {
6785 if (never_nan_nan_) {
6786 return "CompareStub_NE_STRICT_NO_NAN";
6787 } else {
6788 return "CompareStub_NE_STRICT";
6789 }
6790 } else {
6791 if (never_nan_nan_) {
6792 return "CompareStub_NE_NO_NAN";
6793 } else {
6794 return "CompareStub_NE";
6795 }
6796 }
6797 }
6798 case eq: {
6799 if (strict_) {
6800 if (never_nan_nan_) {
6801 return "CompareStub_EQ_STRICT_NO_NAN";
6802 } else {
6803 return "CompareStub_EQ_STRICT";
6804 }
6805 } else {
6806 if (never_nan_nan_) {
6807 return "CompareStub_EQ_NO_NAN";
6808 } else {
6809 return "CompareStub_EQ";
6810 }
6811 }
6812 }
6813 default: return "CompareStub";
6814 }
6815}
6816
6817
Steve Blocka7e24c12009-10-30 11:49:00 +00006818int CompareStub::MinorKey() {
Leon Clarkee46be812010-01-19 14:06:41 +00006819 // Encode the three parameters in a unique 16 bit value.
6820 ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
6821 int nnn_value = (never_nan_nan_ ? 2 : 0);
6822 if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
6823 return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006824}
6825
6826
Andrei Popescu31002712010-02-23 13:46:05 +00006827void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
6828 Register dest,
6829 Register src,
6830 Register count,
6831 Register scratch,
6832 bool ascii) {
6833 Label loop;
6834 Label done;
6835 // This loop just copies one character at a time, as it is only used for very
6836 // short strings.
6837 if (!ascii) {
6838 __ add(count, count, Operand(count), SetCC);
6839 } else {
6840 __ cmp(count, Operand(0));
6841 }
6842 __ b(eq, &done);
6843
6844 __ bind(&loop);
6845 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
6846 // Perform sub between load and dependent store to get the load time to
6847 // complete.
6848 __ sub(count, count, Operand(1), SetCC);
6849 __ strb(scratch, MemOperand(dest, 1, PostIndex));
6850 // last iteration.
6851 __ b(gt, &loop);
6852
6853 __ bind(&done);
6854}
6855
6856
6857enum CopyCharactersFlags {
6858 COPY_ASCII = 1,
6859 DEST_ALWAYS_ALIGNED = 2
6860};
6861
6862
6863void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
6864 Register dest,
6865 Register src,
6866 Register count,
6867 Register scratch1,
6868 Register scratch2,
6869 Register scratch3,
6870 Register scratch4,
6871 Register scratch5,
6872 int flags) {
6873 bool ascii = (flags & COPY_ASCII) != 0;
6874 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
6875
6876 if (dest_always_aligned && FLAG_debug_code) {
6877 // Check that destination is actually word aligned if the flag says
6878 // that it is.
6879 __ tst(dest, Operand(kPointerAlignmentMask));
6880 __ Check(eq, "Destination of copy not aligned.");
6881 }
6882
6883 const int kReadAlignment = 4;
6884 const int kReadAlignmentMask = kReadAlignment - 1;
6885 // Ensure that reading an entire aligned word containing the last character
6886 // of a string will not read outside the allocated area (because we pad up
6887 // to kObjectAlignment).
6888 ASSERT(kObjectAlignment >= kReadAlignment);
6889 // Assumes word reads and writes are little endian.
6890 // Nothing to do for zero characters.
6891 Label done;
6892 if (!ascii) {
6893 __ add(count, count, Operand(count), SetCC);
6894 } else {
6895 __ cmp(count, Operand(0));
6896 }
6897 __ b(eq, &done);
6898
6899 // Assume that you cannot read (or write) unaligned.
6900 Label byte_loop;
6901 // Must copy at least eight bytes, otherwise just do it one byte at a time.
6902 __ cmp(count, Operand(8));
6903 __ add(count, dest, Operand(count));
6904 Register limit = count; // Read until src equals this.
6905 __ b(lt, &byte_loop);
6906
6907 if (!dest_always_aligned) {
6908 // Align dest by byte copying. Copies between zero and three bytes.
6909 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
6910 Label dest_aligned;
6911 __ b(eq, &dest_aligned);
6912 __ cmp(scratch4, Operand(2));
6913 __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
6914 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
6915 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
6916 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
6917 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
6918 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
6919 __ bind(&dest_aligned);
6920 }
6921
6922 Label simple_loop;
6923
6924 __ sub(scratch4, dest, Operand(src));
6925 __ and_(scratch4, scratch4, Operand(0x03), SetCC);
6926 __ b(eq, &simple_loop);
6927 // Shift register is number of bits in a source word that
6928 // must be combined with bits in the next source word in order
6929 // to create a destination word.
6930
6931 // Complex loop for src/dst that are not aligned the same way.
6932 {
6933 Label loop;
6934 __ mov(scratch4, Operand(scratch4, LSL, 3));
6935 Register left_shift = scratch4;
6936 __ and_(src, src, Operand(~3)); // Round down to load previous word.
6937 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
6938 // Store the "shift" most significant bits of scratch in the least
6939 // signficant bits (i.e., shift down by (32-shift)).
6940 __ rsb(scratch2, left_shift, Operand(32));
6941 Register right_shift = scratch2;
6942 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
6943
6944 __ bind(&loop);
6945 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
6946 __ sub(scratch5, limit, Operand(dest));
6947 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
6948 __ str(scratch1, MemOperand(dest, 4, PostIndex));
6949 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
6950 // Loop if four or more bytes left to copy.
6951 // Compare to eight, because we did the subtract before increasing dst.
6952 __ sub(scratch5, scratch5, Operand(8), SetCC);
6953 __ b(ge, &loop);
6954 }
6955 // There is now between zero and three bytes left to copy (negative that
6956 // number is in scratch5), and between one and three bytes already read into
6957 // scratch1 (eight times that number in scratch4). We may have read past
6958 // the end of the string, but because objects are aligned, we have not read
6959 // past the end of the object.
6960 // Find the minimum of remaining characters to move and preloaded characters
6961 // and write those as bytes.
6962 __ add(scratch5, scratch5, Operand(4), SetCC);
6963 __ b(eq, &done);
6964 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
6965 // Move minimum of bytes read and bytes left to copy to scratch4.
6966 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
6967 // Between one and three (value in scratch5) characters already read into
6968 // scratch ready to write.
6969 __ cmp(scratch5, Operand(2));
6970 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
6971 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
6972 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
6973 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
6974 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
6975 // Copy any remaining bytes.
6976 __ b(&byte_loop);
6977
6978 // Simple loop.
6979 // Copy words from src to dst, until less than four bytes left.
6980 // Both src and dest are word aligned.
6981 __ bind(&simple_loop);
6982 {
6983 Label loop;
6984 __ bind(&loop);
6985 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
6986 __ sub(scratch3, limit, Operand(dest));
6987 __ str(scratch1, MemOperand(dest, 4, PostIndex));
6988 // Compare to 8, not 4, because we do the substraction before increasing
6989 // dest.
6990 __ cmp(scratch3, Operand(8));
6991 __ b(ge, &loop);
6992 }
6993
6994 // Copy bytes from src to dst until dst hits limit.
6995 __ bind(&byte_loop);
6996 __ cmp(dest, Operand(limit));
6997 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
6998 __ b(ge, &done);
6999 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
7000 __ b(&byte_loop);
7001
7002 __ bind(&done);
7003}
7004
7005
7006void SubStringStub::Generate(MacroAssembler* masm) {
7007 Label runtime;
7008
7009 // Stack frame on entry.
7010 // lr: return address
7011 // sp[0]: to
7012 // sp[4]: from
7013 // sp[8]: string
7014
7015 // This stub is called from the native-call %_SubString(...), so
7016 // nothing can be assumed about the arguments. It is tested that:
7017 // "string" is a sequential string,
7018 // both "from" and "to" are smis, and
7019 // 0 <= from <= to <= string.length.
7020 // If any of these assumptions fail, we call the runtime system.
7021
7022 static const int kToOffset = 0 * kPointerSize;
7023 static const int kFromOffset = 1 * kPointerSize;
7024 static const int kStringOffset = 2 * kPointerSize;
7025
7026
7027 // Check bounds and smi-ness.
7028 __ ldr(r7, MemOperand(sp, kToOffset));
7029 __ ldr(r6, MemOperand(sp, kFromOffset));
7030 ASSERT_EQ(0, kSmiTag);
7031 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
7032 // I.e., arithmetic shift right by one un-smi-tags.
7033 __ mov(r2, Operand(r7, ASR, 1), SetCC);
7034 __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
7035 // If either r2 or r6 had the smi tag bit set, then carry is set now.
7036 __ b(cs, &runtime); // Either "from" or "to" is not a smi.
7037 __ b(mi, &runtime); // From is negative.
7038
7039 __ sub(r2, r2, Operand(r3), SetCC);
7040 __ b(mi, &runtime); // Fail if from > to.
7041 // Handle sub-strings of length 2 and less in the runtime system.
7042 __ cmp(r2, Operand(2));
7043 __ b(le, &runtime);
7044
7045 // r2: length
7046 // r6: from (smi)
7047 // r7: to (smi)
7048
7049 // Make sure first argument is a sequential (or flat) string.
7050 __ ldr(r5, MemOperand(sp, kStringOffset));
7051 ASSERT_EQ(0, kSmiTag);
7052 __ tst(r5, Operand(kSmiTagMask));
7053 __ b(eq, &runtime);
7054 Condition is_string = masm->IsObjectStringType(r5, r1);
7055 __ b(NegateCondition(is_string), &runtime);
7056
7057 // r1: instance type
7058 // r2: length
7059 // r5: string
7060 // r6: from (smi)
7061 // r7: to (smi)
7062 Label seq_string;
7063 __ and_(r4, r1, Operand(kStringRepresentationMask));
7064 ASSERT(kSeqStringTag < kConsStringTag);
7065 ASSERT(kExternalStringTag > kConsStringTag);
7066 __ cmp(r4, Operand(kConsStringTag));
7067 __ b(gt, &runtime); // External strings go to runtime.
7068 __ b(lt, &seq_string); // Sequential strings are handled directly.
7069
7070 // Cons string. Try to recurse (once) on the first substring.
7071 // (This adds a little more generality than necessary to handle flattened
7072 // cons strings, but not much).
7073 __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
7074 __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
7075 __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7076 __ tst(r1, Operand(kStringRepresentationMask));
7077 ASSERT_EQ(0, kSeqStringTag);
7078 __ b(ne, &runtime); // Cons and External strings go to runtime.
7079
7080 // Definitly a sequential string.
7081 __ bind(&seq_string);
7082
7083 // r1: instance type.
7084 // r2: length
7085 // r5: string
7086 // r6: from (smi)
7087 // r7: to (smi)
7088 __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
7089 __ cmp(r4, Operand(r7, ASR, 1));
7090 __ b(lt, &runtime); // Fail if to > length.
7091
7092 // r1: instance type.
7093 // r2: result string length.
7094 // r5: string.
7095 // r6: from offset (smi)
7096 // Check for flat ascii string.
7097 Label non_ascii_flat;
7098 __ tst(r1, Operand(kStringEncodingMask));
7099 ASSERT_EQ(0, kTwoByteStringTag);
7100 __ b(eq, &non_ascii_flat);
7101
7102 // Allocate the result.
7103 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
7104
7105 // r0: result string.
7106 // r2: result string length.
7107 // r5: string.
7108 // r6: from offset (smi)
7109 // Locate first character of result.
7110 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7111 // Locate 'from' character of string.
7112 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7113 __ add(r5, r5, Operand(r6, ASR, 1));
7114
7115 // r0: result string.
7116 // r1: first character of result string.
7117 // r2: result string length.
7118 // r5: first character of sub string to copy.
7119 ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
7120 GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
7121 COPY_ASCII | DEST_ALWAYS_ALIGNED);
7122 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
7123 __ add(sp, sp, Operand(3 * kPointerSize));
7124 __ Ret();
7125
7126 __ bind(&non_ascii_flat);
7127 // r2: result string length.
7128 // r5: string.
7129 // r6: from offset (smi)
7130 // Check for flat two byte string.
7131
7132 // Allocate the result.
7133 __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
7134
7135 // r0: result string.
7136 // r2: result string length.
7137 // r5: string.
7138 // Locate first character of result.
7139 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7140 // Locate 'from' character of string.
7141 __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7142 // As "from" is a smi it is 2 times the value which matches the size of a two
7143 // byte character.
7144 __ add(r5, r5, Operand(r6));
7145
7146 // r0: result string.
7147 // r1: first character of result.
7148 // r2: result length.
7149 // r5: first character of string to copy.
7150 ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
7151 GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
7152 DEST_ALWAYS_ALIGNED);
7153 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
7154 __ add(sp, sp, Operand(3 * kPointerSize));
7155 __ Ret();
7156
7157 // Just jump to runtime to create the sub string.
7158 __ bind(&runtime);
7159 __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
7160}
Leon Clarked91b9f72010-01-27 17:25:45 +00007161
7162
7163void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
7164 Register left,
7165 Register right,
7166 Register scratch1,
7167 Register scratch2,
7168 Register scratch3,
7169 Register scratch4) {
7170 Label compare_lengths;
7171 // Find minimum length and length difference.
7172 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
7173 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
7174 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
7175 Register length_delta = scratch3;
7176 __ mov(scratch1, scratch2, LeaveCC, gt);
7177 Register min_length = scratch1;
7178 __ tst(min_length, Operand(min_length));
7179 __ b(eq, &compare_lengths);
7180
7181 // Setup registers so that we only need to increment one register
7182 // in the loop.
7183 __ add(scratch2, min_length,
7184 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7185 __ add(left, left, Operand(scratch2));
7186 __ add(right, right, Operand(scratch2));
7187 // Registers left and right points to the min_length character of strings.
7188 __ rsb(min_length, min_length, Operand(-1));
7189 Register index = min_length;
7190 // Index starts at -min_length.
7191
7192 {
7193 // Compare loop.
7194 Label loop;
7195 __ bind(&loop);
7196 // Compare characters.
7197 __ add(index, index, Operand(1), SetCC);
7198 __ ldrb(scratch2, MemOperand(left, index), ne);
7199 __ ldrb(scratch4, MemOperand(right, index), ne);
7200 // Skip to compare lengths with eq condition true.
7201 __ b(eq, &compare_lengths);
7202 __ cmp(scratch2, scratch4);
7203 __ b(eq, &loop);
7204 // Fallthrough with eq condition false.
7205 }
7206 // Compare lengths - strings up to min-length are equal.
7207 __ bind(&compare_lengths);
7208 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
7209 // Use zero length_delta as result.
7210 __ mov(r0, Operand(length_delta), SetCC, eq);
7211 // Fall through to here if characters compare not-equal.
7212 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
7213 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
7214 __ Ret();
7215}
7216
7217
7218void StringCompareStub::Generate(MacroAssembler* masm) {
7219 Label runtime;
7220
7221 // Stack frame on entry.
Andrei Popescu31002712010-02-23 13:46:05 +00007222 // sp[0]: right string
7223 // sp[4]: left string
7224 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
7225 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
Leon Clarked91b9f72010-01-27 17:25:45 +00007226
7227 Label not_same;
7228 __ cmp(r0, r1);
7229 __ b(ne, &not_same);
7230 ASSERT_EQ(0, EQUAL);
7231 ASSERT_EQ(0, kSmiTag);
7232 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
7233 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
7234 __ add(sp, sp, Operand(2 * kPointerSize));
7235 __ Ret();
7236
7237 __ bind(&not_same);
7238
7239 // Check that both objects are sequential ascii strings.
7240 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
7241
7242 // Compare flat ascii strings natively. Remove arguments from stack first.
7243 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
7244 __ add(sp, sp, Operand(2 * kPointerSize));
7245 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
7246
7247 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
7248 // tagged as a small integer.
7249 __ bind(&runtime);
7250 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
7251}
7252
7253
Andrei Popescu31002712010-02-23 13:46:05 +00007254void StringAddStub::Generate(MacroAssembler* masm) {
7255 Label string_add_runtime;
7256 // Stack on entry:
7257 // sp[0]: second argument.
7258 // sp[4]: first argument.
7259
7260 // Load the two arguments.
7261 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
7262 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
7263
7264 // Make sure that both arguments are strings if not known in advance.
7265 if (string_check_) {
7266 ASSERT_EQ(0, kSmiTag);
7267 __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
7268 // Load instance types.
7269 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7270 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7271 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7272 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7273 ASSERT_EQ(0, kStringTag);
7274 // If either is not a string, go to runtime.
7275 __ tst(r4, Operand(kIsNotStringMask));
7276 __ tst(r5, Operand(kIsNotStringMask), eq);
7277 __ b(ne, &string_add_runtime);
7278 }
7279
7280 // Both arguments are strings.
7281 // r0: first string
7282 // r1: second string
7283 // r4: first string instance type (if string_check_)
7284 // r5: second string instance type (if string_check_)
7285 {
7286 Label strings_not_empty;
7287 // Check if either of the strings are empty. In that case return the other.
7288 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
7289 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
7290 __ cmp(r2, Operand(0)); // Test if first string is empty.
7291 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
7292 __ cmp(r3, Operand(0), ne); // Else test if second string is empty.
7293 __ b(ne, &strings_not_empty); // If either string was empty, return r0.
7294
7295 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7296 __ add(sp, sp, Operand(2 * kPointerSize));
7297 __ Ret();
7298
7299 __ bind(&strings_not_empty);
7300 }
7301
7302 // Both strings are non-empty.
7303 // r0: first string
7304 // r1: second string
7305 // r2: length of first string
7306 // r3: length of second string
7307 // r4: first string instance type (if string_check_)
7308 // r5: second string instance type (if string_check_)
7309 // Look at the length of the result of adding the two strings.
7310 Label string_add_flat_result;
7311 // Adding two lengths can't overflow.
7312 ASSERT(String::kMaxLength * 2 > String::kMaxLength);
7313 __ add(r6, r2, Operand(r3));
7314 // Use the runtime system when adding two one character strings, as it
7315 // contains optimizations for this specific case using the symbol table.
7316 __ cmp(r6, Operand(2));
7317 __ b(eq, &string_add_runtime);
7318 // Check if resulting string will be flat.
7319 __ cmp(r6, Operand(String::kMinNonFlatLength));
7320 __ b(lt, &string_add_flat_result);
7321 // Handle exceptionally long strings in the runtime system.
7322 ASSERT((String::kMaxLength & 0x80000000) == 0);
7323 ASSERT(IsPowerOf2(String::kMaxLength + 1));
7324 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
7325 __ cmp(r6, Operand(String::kMaxLength + 1));
7326 __ b(hs, &string_add_runtime);
7327
7328 // If result is not supposed to be flat, allocate a cons string object.
7329 // If both strings are ascii the result is an ascii cons string.
7330 if (!string_check_) {
7331 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7332 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7333 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7334 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7335 }
7336 Label non_ascii, allocated;
7337 ASSERT_EQ(0, kTwoByteStringTag);
7338 __ tst(r4, Operand(kStringEncodingMask));
7339 __ tst(r5, Operand(kStringEncodingMask), ne);
7340 __ b(eq, &non_ascii);
7341
7342 // Allocate an ASCII cons string.
7343 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
7344 __ bind(&allocated);
7345 // Fill the fields of the cons string.
7346 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
7347 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
7348 __ mov(r0, Operand(r7));
7349 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7350 __ add(sp, sp, Operand(2 * kPointerSize));
7351 __ Ret();
7352
7353 __ bind(&non_ascii);
7354 // Allocate a two byte cons string.
7355 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
7356 __ jmp(&allocated);
7357
7358 // Handle creating a flat result. First check that both strings are
7359 // sequential and that they have the same encoding.
7360 // r0: first string
7361 // r1: second string
7362 // r2: length of first string
7363 // r3: length of second string
7364 // r4: first string instance type (if string_check_)
7365 // r5: second string instance type (if string_check_)
7366 // r6: sum of lengths.
7367 __ bind(&string_add_flat_result);
7368 if (!string_check_) {
7369 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7370 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
7371 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
7372 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
7373 }
7374 // Check that both strings are sequential.
7375 ASSERT_EQ(0, kSeqStringTag);
7376 __ tst(r4, Operand(kStringRepresentationMask));
7377 __ tst(r5, Operand(kStringRepresentationMask), eq);
7378 __ b(ne, &string_add_runtime);
7379 // Now check if both strings have the same encoding (ASCII/Two-byte).
7380 // r0: first string.
7381 // r1: second string.
7382 // r2: length of first string.
7383 // r3: length of second string.
7384 // r6: sum of lengths..
7385 Label non_ascii_string_add_flat_result;
7386 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
7387 __ eor(r7, r4, Operand(r5));
7388 __ tst(r7, Operand(kStringEncodingMask));
7389 __ b(ne, &string_add_runtime);
7390 // And see if it's ASCII or two-byte.
7391 __ tst(r4, Operand(kStringEncodingMask));
7392 __ b(eq, &non_ascii_string_add_flat_result);
7393
7394 // Both strings are sequential ASCII strings. We also know that they are
7395 // short (since the sum of the lengths is less than kMinNonFlatLength).
7396 __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
7397 // Locate first character of result.
7398 __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7399 // Locate first character of first argument.
7400 __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7401 // r0: first character of first string.
7402 // r1: second string.
7403 // r2: length of first string.
7404 // r3: length of second string.
7405 // r6: first character of result.
7406 // r7: result string.
7407 GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
7408
7409 // Load second argument and locate first character.
7410 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
7411 // r1: first character of second string.
7412 // r3: length of second string.
7413 // r6: next character of result.
7414 // r7: result string.
7415 GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
7416 __ mov(r0, Operand(r7));
7417 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7418 __ add(sp, sp, Operand(2 * kPointerSize));
7419 __ Ret();
7420
7421 __ bind(&non_ascii_string_add_flat_result);
7422 // Both strings are sequential two byte strings.
7423 // r0: first string.
7424 // r1: second string.
7425 // r2: length of first string.
7426 // r3: length of second string.
7427 // r6: sum of length of strings.
7428 __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
7429 // r0: first string.
7430 // r1: second string.
7431 // r2: length of first string.
7432 // r3: length of second string.
7433 // r7: result string.
7434
7435 // Locate first character of result.
7436 __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7437 // Locate first character of first argument.
7438 __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7439
7440 // r0: first character of first string.
7441 // r1: second string.
7442 // r2: length of first string.
7443 // r3: length of second string.
7444 // r6: first character of result.
7445 // r7: result string.
7446 GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
7447
7448 // Locate first character of second argument.
7449 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
7450
7451 // r1: first character of second string.
7452 // r3: length of second string.
7453 // r6: next character of result (after copy of first string).
7454 // r7: result string.
7455 GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
7456
7457 __ mov(r0, Operand(r7));
7458 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
7459 __ add(sp, sp, Operand(2 * kPointerSize));
7460 __ Ret();
7461
7462 // Just jump to runtime to add the two strings.
7463 __ bind(&string_add_runtime);
7464 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
7465}
7466
7467
Steve Blocka7e24c12009-10-30 11:49:00 +00007468#undef __
7469
7470} } // namespace v8::internal