blob: 68ae0267a9022480beedbf7d75c2c4beb0e7346e [file] [log] [blame]
Leon Clarked91b9f72010-01-27 17:25:45 +00001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
Steve Blockd0582a62009-12-15 09:54:21 +000032#include "compiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "debug.h"
Steve Block6ded16b2010-05-10 14:33:55 +010034#include "ic-inl.h"
35#include "jsregexp.h"
Kristian Monsen25f61362010-05-21 11:50:48 +010036#include "jump-target-light-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "parser.h"
Steve Block6ded16b2010-05-10 14:33:55 +010038#include "regexp-macro-assembler.h"
39#include "regexp-stack.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000040#include "register-allocator-inl.h"
41#include "runtime.h"
42#include "scopes.h"
Steve Block6ded16b2010-05-10 14:33:55 +010043#include "virtual-frame-inl.h"
Kristian Monsen25f61362010-05-21 11:50:48 +010044#include "virtual-frame-arm-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045
46namespace v8 {
47namespace internal {
48
Kristian Monsen25f61362010-05-21 11:50:48 +010049
Steve Blocka7e24c12009-10-30 11:49:00 +000050#define __ ACCESS_MASM(masm_)
51
52static void EmitIdenticalObjectComparison(MacroAssembler* masm,
53 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +000054 Condition cc,
55 bool never_nan_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +000056static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +000057 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +000058 Label* slow,
59 bool strict);
60static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
61static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
62static void MultiplyByKnownInt(MacroAssembler* masm,
63 Register source,
64 Register destination,
65 int known_int);
66static bool IsEasyToMultiplyBy(int x);
67
68
69
70// -------------------------------------------------------------------------
71// Platform-specific DeferredCode functions.
72
73void DeferredCode::SaveRegisters() {
74 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
75 int action = registers_[i];
76 if (action == kPush) {
77 __ push(RegisterAllocator::ToRegister(i));
78 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
79 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
80 }
81 }
82}
83
84
85void DeferredCode::RestoreRegisters() {
86 // Restore registers in reverse order due to the stack.
87 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
88 int action = registers_[i];
89 if (action == kPush) {
90 __ pop(RegisterAllocator::ToRegister(i));
91 } else if (action != kIgnore) {
92 action &= ~kSyncedFlag;
93 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
94 }
95 }
96}
97
98
99// -------------------------------------------------------------------------
100// CodeGenState implementation.
101
102CodeGenState::CodeGenState(CodeGenerator* owner)
103 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000104 true_target_(NULL),
105 false_target_(NULL),
106 previous_(NULL) {
107 owner_->set_state(this);
108}
109
110
111CodeGenState::CodeGenState(CodeGenerator* owner,
Steve Blocka7e24c12009-10-30 11:49:00 +0000112 JumpTarget* true_target,
113 JumpTarget* false_target)
114 : owner_(owner),
Steve Blocka7e24c12009-10-30 11:49:00 +0000115 true_target_(true_target),
116 false_target_(false_target),
117 previous_(owner->state()) {
118 owner_->set_state(this);
119}
120
121
122CodeGenState::~CodeGenState() {
123 ASSERT(owner_->state() == this);
124 owner_->set_state(previous_);
125}
126
127
128// -------------------------------------------------------------------------
129// CodeGenerator implementation
130
Andrei Popescu31002712010-02-23 13:46:05 +0000131CodeGenerator::CodeGenerator(MacroAssembler* masm)
132 : deferred_(8),
Leon Clarke4515c472010-02-03 11:58:03 +0000133 masm_(masm),
Andrei Popescu31002712010-02-23 13:46:05 +0000134 info_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000135 frame_(NULL),
136 allocator_(NULL),
137 cc_reg_(al),
138 state_(NULL),
Steve Block6ded16b2010-05-10 14:33:55 +0100139 loop_nesting_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000140 function_return_is_shadowed_(false) {
141}
142
143
144// Calling conventions:
145// fp: caller's frame pointer
146// sp: stack pointer
147// r1: called JS function
148// cp: callee's context
149
Andrei Popescu402d9372010-02-26 13:31:12 +0000150void CodeGenerator::Generate(CompilationInfo* info) {
Steve Blockd0582a62009-12-15 09:54:21 +0000151 // Record the position for debugging purposes.
Andrei Popescu31002712010-02-23 13:46:05 +0000152 CodeForFunctionPosition(info->function());
Steve Block6ded16b2010-05-10 14:33:55 +0100153 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
Steve Blocka7e24c12009-10-30 11:49:00 +0000154
155 // Initialize state.
Andrei Popescu31002712010-02-23 13:46:05 +0000156 info_ = info;
Steve Blocka7e24c12009-10-30 11:49:00 +0000157 ASSERT(allocator_ == NULL);
158 RegisterAllocator register_allocator(this);
159 allocator_ = &register_allocator;
160 ASSERT(frame_ == NULL);
161 frame_ = new VirtualFrame();
162 cc_reg_ = al;
Steve Block6ded16b2010-05-10 14:33:55 +0100163
164 // Adjust for function-level loop nesting.
165 ASSERT_EQ(0, loop_nesting_);
166 loop_nesting_ = info->loop_nesting();
167
Steve Blocka7e24c12009-10-30 11:49:00 +0000168 {
169 CodeGenState state(this);
170
171 // Entry:
172 // Stack: receiver, arguments
173 // lr: return address
174 // fp: caller's frame pointer
175 // sp: stack pointer
176 // r1: called JS function
177 // cp: callee's context
178 allocator_->Initialize();
Leon Clarke4515c472010-02-03 11:58:03 +0000179
Steve Blocka7e24c12009-10-30 11:49:00 +0000180#ifdef DEBUG
181 if (strlen(FLAG_stop_at) > 0 &&
Andrei Popescu31002712010-02-23 13:46:05 +0000182 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 frame_->SpillAll();
184 __ stop("stop-at");
185 }
186#endif
187
Andrei Popescu402d9372010-02-26 13:31:12 +0000188 if (info->mode() == CompilationInfo::PRIMARY) {
Leon Clarke4515c472010-02-03 11:58:03 +0000189 frame_->Enter();
190 // tos: code slot
191
192 // Allocate space for locals and initialize them. This also checks
193 // for stack overflow.
194 frame_->AllocateStackSlots();
195
Steve Block6ded16b2010-05-10 14:33:55 +0100196 VirtualFrame::SpilledScope spilled_scope(frame_);
Kristian Monsen25f61362010-05-21 11:50:48 +0100197 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Leon Clarke4515c472010-02-03 11:58:03 +0000198 if (heap_slots > 0) {
199 // Allocate local context.
200 // Get outer context and create a new context based on it.
201 __ ldr(r0, frame_->Function());
202 frame_->EmitPush(r0);
203 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
204 FastNewContextStub stub(heap_slots);
205 frame_->CallStub(&stub, 1);
206 } else {
207 frame_->CallRuntime(Runtime::kNewContext, 1);
208 }
209
210#ifdef DEBUG
211 JumpTarget verified_true;
Steve Block6ded16b2010-05-10 14:33:55 +0100212 __ cmp(r0, cp);
Leon Clarke4515c472010-02-03 11:58:03 +0000213 verified_true.Branch(eq);
214 __ stop("NewContext: r0 is expected to be the same as cp");
215 verified_true.Bind();
216#endif
217 // Update context local.
218 __ str(cp, frame_->Context());
219 }
220
221 // TODO(1241774): Improve this code:
222 // 1) only needed if we have a context
223 // 2) no need to recompute context ptr every single time
224 // 3) don't copy parameter operand code from SlotOperand!
225 {
226 Comment cmnt2(masm_, "[ copy context parameters into .context");
Leon Clarke4515c472010-02-03 11:58:03 +0000227 // Note that iteration order is relevant here! If we have the same
228 // parameter twice (e.g., function (x, y, x)), and that parameter
229 // needs to be copied into the context, it must be the last argument
230 // passed to the parameter that needs to be copied. This is a rare
231 // case so we don't check for it, instead we rely on the copying
232 // order: such a parameter is copied repeatedly into the same
233 // context location and thus the last value is what is seen inside
234 // the function.
Andrei Popescu31002712010-02-23 13:46:05 +0000235 for (int i = 0; i < scope()->num_parameters(); i++) {
236 Variable* par = scope()->parameter(i);
Leon Clarke4515c472010-02-03 11:58:03 +0000237 Slot* slot = par->slot();
238 if (slot != NULL && slot->type() == Slot::CONTEXT) {
Andrei Popescu31002712010-02-23 13:46:05 +0000239 ASSERT(!scope()->is_global_scope()); // No params in global scope.
Leon Clarke4515c472010-02-03 11:58:03 +0000240 __ ldr(r1, frame_->ParameterAt(i));
241 // Loads r2 with context; used below in RecordWrite.
242 __ str(r1, SlotOperand(slot, r2));
243 // Load the offset into r3.
244 int slot_offset =
245 FixedArray::kHeaderSize + slot->index() * kPointerSize;
246 __ mov(r3, Operand(slot_offset));
247 __ RecordWrite(r2, r3, r1);
248 }
249 }
250 }
251
252 // Store the arguments object. This must happen after context
Steve Block6ded16b2010-05-10 14:33:55 +0100253 // initialization because the arguments object may be stored in
254 // the context.
255 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
256 StoreArgumentsObject(true);
Leon Clarke4515c472010-02-03 11:58:03 +0000257 }
258
259 // Initialize ThisFunction reference if present.
Andrei Popescu31002712010-02-23 13:46:05 +0000260 if (scope()->is_function_scope() && scope()->function() != NULL) {
Leon Clarke4515c472010-02-03 11:58:03 +0000261 __ mov(ip, Operand(Factory::the_hole_value()));
262 frame_->EmitPush(ip);
Andrei Popescu31002712010-02-23 13:46:05 +0000263 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
Leon Clarke4515c472010-02-03 11:58:03 +0000264 }
265 } else {
266 // When used as the secondary compiler for splitting, r1, cp,
267 // fp, and lr have been pushed on the stack. Adjust the virtual
268 // frame to match this state.
269 frame_->Adjust(4);
Andrei Popescu402d9372010-02-26 13:31:12 +0000270
271 // Bind all the bailout labels to the beginning of the function.
272 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
273 for (int i = 0; i < bailouts->length(); i++) {
274 __ bind(bailouts->at(i)->label());
275 }
Leon Clarke4515c472010-02-03 11:58:03 +0000276 }
277
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 // Initialize the function return target after the locals are set
279 // up, because it needs the expected frame height from the frame.
Kristian Monsen25f61362010-05-21 11:50:48 +0100280 function_return_.SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 function_return_is_shadowed_ = false;
282
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 // Generate code to 'execute' declarations and initialize functions
284 // (source elements). In case of an illegal redeclaration we need to
285 // handle that instead of processing the declarations.
Andrei Popescu31002712010-02-23 13:46:05 +0000286 if (scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000287 Comment cmnt(masm_, "[ illegal redeclarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000288 scope()->VisitIllegalRedeclaration(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000289 } else {
290 Comment cmnt(masm_, "[ declarations");
Andrei Popescu31002712010-02-23 13:46:05 +0000291 ProcessDeclarations(scope()->declarations());
Steve Blocka7e24c12009-10-30 11:49:00 +0000292 // Bail out if a stack-overflow exception occurred when processing
293 // declarations.
294 if (HasStackOverflow()) return;
295 }
296
297 if (FLAG_trace) {
298 frame_->CallRuntime(Runtime::kTraceEnter, 0);
299 // Ignore the return value.
300 }
301
302 // Compile the body of the function in a vanilla state. Don't
303 // bother compiling all the code if the scope has an illegal
304 // redeclaration.
Andrei Popescu31002712010-02-23 13:46:05 +0000305 if (!scope()->HasIllegalRedeclaration()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000306 Comment cmnt(masm_, "[ function body");
307#ifdef DEBUG
308 bool is_builtin = Bootstrapper::IsActive();
309 bool should_trace =
310 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
311 if (should_trace) {
312 frame_->CallRuntime(Runtime::kDebugTrace, 0);
313 // Ignore the return value.
314 }
315#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000316 VisitStatementsAndSpill(info->function()->body());
Steve Blocka7e24c12009-10-30 11:49:00 +0000317 }
318 }
319
320 // Generate the return sequence if necessary.
321 if (has_valid_frame() || function_return_.is_linked()) {
322 if (!function_return_.is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000323 CodeForReturnPosition(info->function());
Steve Blocka7e24c12009-10-30 11:49:00 +0000324 }
325 // exit
326 // r0: result
327 // sp: stack pointer
328 // fp: frame pointer
329 // cp: callee's context
330 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
331
332 function_return_.Bind();
333 if (FLAG_trace) {
334 // Push the return value on the stack as the parameter.
335 // Runtime::TraceExit returns the parameter as it is.
336 frame_->EmitPush(r0);
337 frame_->CallRuntime(Runtime::kTraceExit, 1);
338 }
339
Steve Block6ded16b2010-05-10 14:33:55 +0100340#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000341 // Add a label for checking the size of the code used for returning.
342 Label check_exit_codesize;
343 masm_->bind(&check_exit_codesize);
Steve Block6ded16b2010-05-10 14:33:55 +0100344#endif
345 // Make sure that the constant pool is not emitted inside of the return
346 // sequence.
347 { Assembler::BlockConstPoolScope block_const_pool(masm_);
348 // Tear down the frame which will restore the caller's frame pointer and
349 // the link register.
350 frame_->Exit();
Steve Blocka7e24c12009-10-30 11:49:00 +0000351
Steve Block6ded16b2010-05-10 14:33:55 +0100352 // Here we use masm_-> instead of the __ macro to avoid the code coverage
353 // tool from instrumenting as we rely on the code size here.
354 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
355 masm_->add(sp, sp, Operand(sp_delta));
356 masm_->Jump(lr);
357
358#ifdef DEBUG
359 // Check that the size of the code used for returning matches what is
360 // expected by the debugger. If the sp_delts above cannot be encoded in
361 // the add instruction the add will generate two instructions.
362 int return_sequence_length =
363 masm_->InstructionsGeneratedSince(&check_exit_codesize);
364 CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
365 return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
366#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000367 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000368 }
369
Steve Block6ded16b2010-05-10 14:33:55 +0100370 // Adjust for function-level loop nesting.
371 ASSERT(loop_nesting_ == info->loop_nesting());
372 loop_nesting_ = 0;
373
Steve Blocka7e24c12009-10-30 11:49:00 +0000374 // Code generation state must be reset.
375 ASSERT(!has_cc());
376 ASSERT(state_ == NULL);
Steve Block6ded16b2010-05-10 14:33:55 +0100377 ASSERT(loop_nesting() == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 ASSERT(!function_return_is_shadowed_);
379 function_return_.Unuse();
380 DeleteFrame();
381
382 // Process any deferred code using the register allocator.
383 if (!HasStackOverflow()) {
384 ProcessDeferred();
385 }
386
387 allocator_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000388}
389
390
391MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
392 // Currently, this assertion will fail if we try to assign to
393 // a constant variable that is constant because it is read-only
394 // (such as the variable referring to a named function expression).
395 // We need to implement assignments to read-only variables.
396 // Ideally, we should do this during AST generation (by converting
397 // such assignments into expression statements); however, in general
398 // we may not be able to make the decision until past AST generation,
399 // that is when the entire program is known.
400 ASSERT(slot != NULL);
401 int index = slot->index();
402 switch (slot->type()) {
403 case Slot::PARAMETER:
404 return frame_->ParameterAt(index);
405
406 case Slot::LOCAL:
407 return frame_->LocalAt(index);
408
409 case Slot::CONTEXT: {
410 // Follow the context chain if necessary.
411 ASSERT(!tmp.is(cp)); // do not overwrite context register
412 Register context = cp;
413 int chain_length = scope()->ContextChainLength(slot->var()->scope());
414 for (int i = 0; i < chain_length; i++) {
415 // Load the closure.
416 // (All contexts, even 'with' contexts, have a closure,
417 // and it is the same for all contexts inside a function.
418 // There is no need to go to the function context first.)
419 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
420 // Load the function context (which is the incoming, outer context).
421 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
422 context = tmp;
423 }
424 // We may have a 'with' context now. Get the function context.
425 // (In fact this mov may never be the needed, since the scope analysis
426 // may not permit a direct context access in this case and thus we are
427 // always at a function context. However it is safe to dereference be-
428 // cause the function context of a function context is itself. Before
429 // deleting this mov we should try to create a counter-example first,
430 // though...)
431 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
432 return ContextOperand(tmp, index);
433 }
434
435 default:
436 UNREACHABLE();
437 return MemOperand(r0, 0);
438 }
439}
440
441
442MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
443 Slot* slot,
444 Register tmp,
445 Register tmp2,
446 JumpTarget* slow) {
447 ASSERT(slot->type() == Slot::CONTEXT);
448 Register context = cp;
449
450 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
451 if (s->num_heap_slots() > 0) {
452 if (s->calls_eval()) {
453 // Check that extension is NULL.
454 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
455 __ tst(tmp2, tmp2);
456 slow->Branch(ne);
457 }
458 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
459 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
460 context = tmp;
461 }
462 }
463 // Check that last extension is NULL.
464 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
465 __ tst(tmp2, tmp2);
466 slow->Branch(ne);
467 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
468 return ContextOperand(tmp, slot->index());
469}
470
471
472// Loads a value on TOS. If it is a boolean value, the result may have been
473// (partially) translated into branches, or it may have set the condition
474// code register. If force_cc is set, the value is forced to set the
475// condition code register and no value is pushed. If the condition code
476// register was set, has_cc() is true and cc_reg_ contains the condition to
477// test for 'true'.
478void CodeGenerator::LoadCondition(Expression* x,
Steve Blocka7e24c12009-10-30 11:49:00 +0000479 JumpTarget* true_target,
480 JumpTarget* false_target,
481 bool force_cc) {
482 ASSERT(!has_cc());
483 int original_height = frame_->height();
484
Steve Blockd0582a62009-12-15 09:54:21 +0000485 { CodeGenState new_state(this, true_target, false_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000486 Visit(x);
487
488 // If we hit a stack overflow, we may not have actually visited
489 // the expression. In that case, we ensure that we have a
490 // valid-looking frame state because we will continue to generate
491 // code as we unwind the C++ stack.
492 //
493 // It's possible to have both a stack overflow and a valid frame
494 // state (eg, a subexpression overflowed, visiting it returned
495 // with a dummied frame state, and visiting this expression
496 // returned with a normal-looking state).
497 if (HasStackOverflow() &&
498 has_valid_frame() &&
499 !has_cc() &&
500 frame_->height() == original_height) {
Steve Block6ded16b2010-05-10 14:33:55 +0100501 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 true_target->Jump();
503 }
504 }
505 if (force_cc && frame_ != NULL && !has_cc()) {
506 // Convert the TOS value to a boolean in the condition code register.
507 ToBoolean(true_target, false_target);
508 }
509 ASSERT(!force_cc || !has_valid_frame() || has_cc());
510 ASSERT(!has_valid_frame() ||
511 (has_cc() && frame_->height() == original_height) ||
512 (!has_cc() && frame_->height() == original_height + 1));
513}
514
515
Steve Blockd0582a62009-12-15 09:54:21 +0000516void CodeGenerator::Load(Expression* expr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000517#ifdef DEBUG
518 int original_height = frame_->height();
519#endif
520 JumpTarget true_target;
521 JumpTarget false_target;
Steve Blockd0582a62009-12-15 09:54:21 +0000522 LoadCondition(expr, &true_target, &false_target, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000523
524 if (has_cc()) {
525 // Convert cc_reg_ into a boolean value.
Steve Block6ded16b2010-05-10 14:33:55 +0100526 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000527 JumpTarget loaded;
528 JumpTarget materialize_true;
529 materialize_true.Branch(cc_reg_);
530 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
531 frame_->EmitPush(r0);
532 loaded.Jump();
533 materialize_true.Bind();
534 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
535 frame_->EmitPush(r0);
536 loaded.Bind();
537 cc_reg_ = al;
538 }
539
540 if (true_target.is_linked() || false_target.is_linked()) {
Steve Block6ded16b2010-05-10 14:33:55 +0100541 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // We have at least one condition value that has been "translated"
543 // into a branch, thus it needs to be loaded explicitly.
544 JumpTarget loaded;
545 if (frame_ != NULL) {
546 loaded.Jump(); // Don't lose the current TOS.
547 }
548 bool both = true_target.is_linked() && false_target.is_linked();
549 // Load "true" if necessary.
550 if (true_target.is_linked()) {
551 true_target.Bind();
552 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
553 frame_->EmitPush(r0);
554 }
555 // If both "true" and "false" need to be loaded jump across the code for
556 // "false".
557 if (both) {
558 loaded.Jump();
559 }
560 // Load "false" if necessary.
561 if (false_target.is_linked()) {
562 false_target.Bind();
563 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
564 frame_->EmitPush(r0);
565 }
566 // A value is loaded on all paths reaching this point.
567 loaded.Bind();
568 }
569 ASSERT(has_valid_frame());
570 ASSERT(!has_cc());
Steve Block6ded16b2010-05-10 14:33:55 +0100571 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +0000572}
573
574
575void CodeGenerator::LoadGlobal() {
Steve Block6ded16b2010-05-10 14:33:55 +0100576 Register reg = frame_->GetTOSRegister();
577 __ ldr(reg, GlobalObject());
578 frame_->EmitPush(reg);
Steve Blocka7e24c12009-10-30 11:49:00 +0000579}
580
581
582void CodeGenerator::LoadGlobalReceiver(Register scratch) {
Steve Block6ded16b2010-05-10 14:33:55 +0100583 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000584 __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
585 __ ldr(scratch,
586 FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
587 frame_->EmitPush(scratch);
588}
589
590
Steve Block6ded16b2010-05-10 14:33:55 +0100591ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
592 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
593 ASSERT(scope()->arguments_shadow() != NULL);
594 // We don't want to do lazy arguments allocation for functions that
595 // have heap-allocated contexts, because it interfers with the
596 // uninitialized const tracking in the context objects.
597 return (scope()->num_heap_slots() > 0)
598 ? EAGER_ARGUMENTS_ALLOCATION
599 : LAZY_ARGUMENTS_ALLOCATION;
600}
601
602
603void CodeGenerator::StoreArgumentsObject(bool initial) {
604 VirtualFrame::SpilledScope spilled_scope(frame_);
605
606 ArgumentsAllocationMode mode = ArgumentsMode();
607 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
608
609 Comment cmnt(masm_, "[ store arguments object");
610 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
611 // When using lazy arguments allocation, we store the hole value
612 // as a sentinel indicating that the arguments object hasn't been
613 // allocated yet.
614 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
615 frame_->EmitPush(ip);
616 } else {
617 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
618 __ ldr(r2, frame_->Function());
619 // The receiver is below the arguments, the return address, and the
620 // frame pointer on the stack.
621 const int kReceiverDisplacement = 2 + scope()->num_parameters();
622 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
623 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
624 frame_->Adjust(3);
625 __ Push(r2, r1, r0);
626 frame_->CallStub(&stub, 3);
627 frame_->EmitPush(r0);
628 }
629
630 Variable* arguments = scope()->arguments()->var();
631 Variable* shadow = scope()->arguments_shadow()->var();
632 ASSERT(arguments != NULL && arguments->slot() != NULL);
633 ASSERT(shadow != NULL && shadow->slot() != NULL);
634 JumpTarget done;
635 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
636 // We have to skip storing into the arguments slot if it has
637 // already been written to. This can happen if the a function
638 // has a local variable named 'arguments'.
639 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
640 frame_->EmitPop(r0);
641 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
642 __ cmp(r0, ip);
643 done.Branch(ne);
644 }
645 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
646 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
647 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
648}
649
650
Steve Blockd0582a62009-12-15 09:54:21 +0000651void CodeGenerator::LoadTypeofExpression(Expression* expr) {
652 // Special handling of identifiers as subexpressions of typeof.
Steve Block6ded16b2010-05-10 14:33:55 +0100653 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +0000654 Variable* variable = expr->AsVariableProxy()->AsVariable();
Steve Blocka7e24c12009-10-30 11:49:00 +0000655 if (variable != NULL && !variable->is_this() && variable->is_global()) {
Steve Blockd0582a62009-12-15 09:54:21 +0000656 // For a global variable we build the property reference
657 // <global>.<variable> and perform a (regular non-contextual) property
658 // load to make sure we do not get reference errors.
Steve Blocka7e24c12009-10-30 11:49:00 +0000659 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
660 Literal key(variable->name());
Steve Blocka7e24c12009-10-30 11:49:00 +0000661 Property property(&global, &key, RelocInfo::kNoPosition);
Steve Blockd0582a62009-12-15 09:54:21 +0000662 Reference ref(this, &property);
Steve Block6ded16b2010-05-10 14:33:55 +0100663 ref.GetValue();
Steve Blockd0582a62009-12-15 09:54:21 +0000664 } else if (variable != NULL && variable->slot() != NULL) {
665 // For a variable that rewrites to a slot, we signal it is the immediate
666 // subexpression of a typeof.
Steve Block6ded16b2010-05-10 14:33:55 +0100667 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
Steve Blockd0582a62009-12-15 09:54:21 +0000668 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +0000669 } else {
Steve Blockd0582a62009-12-15 09:54:21 +0000670 // Anything else can be handled normally.
671 LoadAndSpill(expr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000672 }
673}
674
675
Leon Clarked91b9f72010-01-27 17:25:45 +0000676Reference::Reference(CodeGenerator* cgen,
677 Expression* expression,
678 bool persist_after_get)
679 : cgen_(cgen),
680 expression_(expression),
681 type_(ILLEGAL),
682 persist_after_get_(persist_after_get) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000683 cgen->LoadReference(this);
684}
685
686
687Reference::~Reference() {
Leon Clarked91b9f72010-01-27 17:25:45 +0000688 ASSERT(is_unloaded() || is_illegal());
Steve Blocka7e24c12009-10-30 11:49:00 +0000689}
690
691
692void CodeGenerator::LoadReference(Reference* ref) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000693 Comment cmnt(masm_, "[ LoadReference");
694 Expression* e = ref->expression();
695 Property* property = e->AsProperty();
696 Variable* var = e->AsVariableProxy()->AsVariable();
697
698 if (property != NULL) {
699 // The expression is either a property or a variable proxy that rewrites
700 // to a property.
Steve Block6ded16b2010-05-10 14:33:55 +0100701 Load(property->obj());
Leon Clarkee46be812010-01-19 14:06:41 +0000702 if (property->key()->IsPropertyName()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000703 ref->set_type(Reference::NAMED);
704 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100705 Load(property->key());
Steve Blocka7e24c12009-10-30 11:49:00 +0000706 ref->set_type(Reference::KEYED);
707 }
708 } else if (var != NULL) {
709 // The expression is a variable proxy that does not rewrite to a
710 // property. Global variables are treated as named property references.
711 if (var->is_global()) {
712 LoadGlobal();
713 ref->set_type(Reference::NAMED);
714 } else {
715 ASSERT(var->slot() != NULL);
716 ref->set_type(Reference::SLOT);
717 }
718 } else {
719 // Anything else is a runtime error.
Steve Block6ded16b2010-05-10 14:33:55 +0100720 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 LoadAndSpill(e);
722 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
723 }
724}
725
726
727void CodeGenerator::UnloadReference(Reference* ref) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000728 int size = ref->size();
Leon Clarked91b9f72010-01-27 17:25:45 +0000729 ref->set_unloaded();
Steve Block6ded16b2010-05-10 14:33:55 +0100730 if (size == 0) return;
731
732 // Pop a reference from the stack while preserving TOS.
733 VirtualFrame::RegisterAllocationScope scope(this);
734 Comment cmnt(masm_, "[ UnloadReference");
735 if (size > 0) {
736 Register tos = frame_->PopToRegister();
737 frame_->Drop(size);
738 frame_->EmitPush(tos);
739 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000740}
741
742
743// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
744// register to a boolean in the condition code register. The code
745// may jump to 'false_target' in case the register converts to 'false'.
746void CodeGenerator::ToBoolean(JumpTarget* true_target,
747 JumpTarget* false_target) {
Steve Block6ded16b2010-05-10 14:33:55 +0100748 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000749 // Note: The generated code snippet does not change stack variables.
750 // Only the condition code should be set.
751 frame_->EmitPop(r0);
752
753 // Fast case checks
754
755 // Check if the value is 'false'.
756 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
757 __ cmp(r0, ip);
758 false_target->Branch(eq);
759
760 // Check if the value is 'true'.
761 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
762 __ cmp(r0, ip);
763 true_target->Branch(eq);
764
765 // Check if the value is 'undefined'.
766 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
767 __ cmp(r0, ip);
768 false_target->Branch(eq);
769
770 // Check if the value is a smi.
771 __ cmp(r0, Operand(Smi::FromInt(0)));
772 false_target->Branch(eq);
773 __ tst(r0, Operand(kSmiTagMask));
774 true_target->Branch(eq);
775
776 // Slow case: call the runtime.
777 frame_->EmitPush(r0);
778 frame_->CallRuntime(Runtime::kToBool, 1);
779 // Convert the result (r0) to a condition code.
780 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
781 __ cmp(r0, ip);
782
783 cc_reg_ = ne;
784}
785
786
787void CodeGenerator::GenericBinaryOperation(Token::Value op,
788 OverwriteMode overwrite_mode,
789 int constant_rhs) {
Steve Block6ded16b2010-05-10 14:33:55 +0100790 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000791 // sp[0] : y
792 // sp[1] : x
793 // result : r0
794
795 // Stub is entered with a call: 'return address' is in lr.
796 switch (op) {
Steve Block6ded16b2010-05-10 14:33:55 +0100797 case Token::ADD:
798 case Token::SUB:
799 case Token::MUL:
800 case Token::DIV:
801 case Token::MOD:
802 case Token::BIT_OR:
803 case Token::BIT_AND:
804 case Token::BIT_XOR:
805 case Token::SHL:
806 case Token::SHR:
807 case Token::SAR: {
808 frame_->EmitPop(r0); // r0 : y
809 frame_->EmitPop(r1); // r1 : x
810 GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
811 frame_->CallStub(&stub, 0);
812 break;
813 }
814
815 case Token::COMMA:
816 frame_->EmitPop(r0);
817 // Simply discard left value.
818 frame_->Drop();
819 break;
820
821 default:
822 // Other cases should have been handled before this point.
823 UNREACHABLE();
824 break;
825 }
826}
827
828
829void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
830 OverwriteMode overwrite_mode,
831 int constant_rhs) {
832 // top of virtual frame: y
833 // 2nd elt. on virtual frame : x
834 // result : top of virtual frame
835
836 // Stub is entered with a call: 'return address' is in lr.
837 switch (op) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000838 case Token::ADD: // fall through.
839 case Token::SUB: // fall through.
840 case Token::MUL:
841 case Token::DIV:
842 case Token::MOD:
843 case Token::BIT_OR:
844 case Token::BIT_AND:
845 case Token::BIT_XOR:
846 case Token::SHL:
847 case Token::SHR:
848 case Token::SAR: {
Steve Block6ded16b2010-05-10 14:33:55 +0100849 Register rhs = frame_->PopToRegister();
850 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
851 {
852 VirtualFrame::SpilledScope spilled_scope(frame_);
853 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
854 frame_->CallStub(&stub, 0);
855 }
856 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000857 break;
858 }
859
Steve Block6ded16b2010-05-10 14:33:55 +0100860 case Token::COMMA: {
861 Register scratch = frame_->PopToRegister();
862 // Simply discard left value.
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 frame_->Drop();
Steve Block6ded16b2010-05-10 14:33:55 +0100864 frame_->EmitPush(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 break;
Steve Block6ded16b2010-05-10 14:33:55 +0100866 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000867
868 default:
869 // Other cases should have been handled before this point.
870 UNREACHABLE();
871 break;
872 }
873}
874
875
876class DeferredInlineSmiOperation: public DeferredCode {
877 public:
878 DeferredInlineSmiOperation(Token::Value op,
879 int value,
880 bool reversed,
Steve Block6ded16b2010-05-10 14:33:55 +0100881 OverwriteMode overwrite_mode,
882 Register tos)
Steve Blocka7e24c12009-10-30 11:49:00 +0000883 : op_(op),
884 value_(value),
885 reversed_(reversed),
Steve Block6ded16b2010-05-10 14:33:55 +0100886 overwrite_mode_(overwrite_mode),
887 tos_register_(tos) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000888 set_comment("[ DeferredInlinedSmiOperation");
889 }
890
891 virtual void Generate();
892
893 private:
894 Token::Value op_;
895 int value_;
896 bool reversed_;
897 OverwriteMode overwrite_mode_;
Steve Block6ded16b2010-05-10 14:33:55 +0100898 Register tos_register_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000899};
900
901
902void DeferredInlineSmiOperation::Generate() {
Steve Block6ded16b2010-05-10 14:33:55 +0100903 Register lhs = r1;
904 Register rhs = r0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000905 switch (op_) {
906 case Token::ADD: {
907 // Revert optimistic add.
908 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100909 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000910 __ mov(r1, Operand(Smi::FromInt(value_)));
911 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100912 __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000913 __ mov(r0, Operand(Smi::FromInt(value_)));
914 }
915 break;
916 }
917
918 case Token::SUB: {
919 // Revert optimistic sub.
920 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100921 __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000922 __ mov(r1, Operand(Smi::FromInt(value_)));
923 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100924 __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000925 __ mov(r0, Operand(Smi::FromInt(value_)));
926 }
927 break;
928 }
929
930 // For these operations there is no optimistic operation that needs to be
931 // reverted.
932 case Token::MUL:
933 case Token::MOD:
934 case Token::BIT_OR:
935 case Token::BIT_XOR:
936 case Token::BIT_AND: {
937 if (reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100938 if (tos_register_.is(r0)) {
939 __ mov(r1, Operand(Smi::FromInt(value_)));
940 } else {
941 ASSERT(tos_register_.is(r1));
942 __ mov(r0, Operand(Smi::FromInt(value_)));
943 lhs = r0;
944 rhs = r1;
945 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000946 } else {
Steve Block6ded16b2010-05-10 14:33:55 +0100947 if (tos_register_.is(r1)) {
948 __ mov(r0, Operand(Smi::FromInt(value_)));
949 } else {
950 ASSERT(tos_register_.is(r0));
951 __ mov(r1, Operand(Smi::FromInt(value_)));
952 lhs = r0;
953 rhs = r1;
954 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000955 }
956 break;
957 }
958
959 case Token::SHL:
960 case Token::SHR:
961 case Token::SAR: {
962 if (!reversed_) {
Steve Block6ded16b2010-05-10 14:33:55 +0100963 if (tos_register_.is(r1)) {
964 __ mov(r0, Operand(Smi::FromInt(value_)));
965 } else {
966 ASSERT(tos_register_.is(r0));
967 __ mov(r1, Operand(Smi::FromInt(value_)));
968 lhs = r0;
969 rhs = r1;
970 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000971 } else {
972 UNREACHABLE(); // Should have been handled in SmiOperation.
973 }
974 break;
975 }
976
977 default:
978 // Other cases should have been handled before this point.
979 UNREACHABLE();
980 break;
981 }
982
Steve Block6ded16b2010-05-10 14:33:55 +0100983 GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000984 __ CallStub(&stub);
Steve Block6ded16b2010-05-10 14:33:55 +0100985 // The generic stub returns its value in r0, but that's not
986 // necessarily what we want. We want whatever the inlined code
987 // expected, which is that the answer is in the same register as
988 // the operand was.
989 __ Move(tos_register_, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000990}
991
992
993static bool PopCountLessThanEqual2(unsigned int x) {
994 x &= x - 1;
995 return (x & (x - 1)) == 0;
996}
997
998
999// Returns the index of the lowest bit set.
1000static int BitPosition(unsigned x) {
1001 int bit_posn = 0;
1002 while ((x & 0xf) == 0) {
1003 bit_posn += 4;
1004 x >>= 4;
1005 }
1006 while ((x & 1) == 0) {
1007 bit_posn++;
1008 x >>= 1;
1009 }
1010 return bit_posn;
1011}
1012
1013
1014void CodeGenerator::SmiOperation(Token::Value op,
1015 Handle<Object> value,
1016 bool reversed,
1017 OverwriteMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001018 int int_value = Smi::cast(*value)->value();
1019
Steve Block6ded16b2010-05-10 14:33:55 +01001020 bool something_to_inline;
1021 switch (op) {
1022 case Token::ADD:
1023 case Token::SUB:
1024 case Token::BIT_AND:
1025 case Token::BIT_OR:
1026 case Token::BIT_XOR: {
1027 something_to_inline = true;
1028 break;
1029 }
1030 case Token::SHL:
1031 case Token::SHR:
1032 case Token::SAR: {
1033 if (reversed) {
1034 something_to_inline = false;
1035 } else {
1036 something_to_inline = true;
1037 }
1038 break;
1039 }
1040 case Token::MOD: {
1041 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1042 something_to_inline = false;
1043 } else {
1044 something_to_inline = true;
1045 }
1046 break;
1047 }
1048 case Token::MUL: {
1049 if (!IsEasyToMultiplyBy(int_value)) {
1050 something_to_inline = false;
1051 } else {
1052 something_to_inline = true;
1053 }
1054 break;
1055 }
1056 default: {
1057 something_to_inline = false;
1058 break;
1059 }
1060 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001061
Steve Block6ded16b2010-05-10 14:33:55 +01001062 if (!something_to_inline) {
1063 if (!reversed) {
1064 // Push the rhs onto the virtual frame by putting it in a TOS register.
1065 Register rhs = frame_->GetTOSRegister();
1066 __ mov(rhs, Operand(value));
1067 frame_->EmitPush(rhs);
1068 VirtualFrameBinaryOperation(op, mode, int_value);
1069 } else {
1070 // Pop the rhs, then push lhs and rhs in the right order. Only performs
1071 // at most one pop, the rest takes place in TOS registers.
1072 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
1073 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
1074 __ mov(lhs, Operand(value));
1075 frame_->EmitPush(lhs);
1076 frame_->EmitPush(rhs);
1077 VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
1078 }
1079 return;
1080 }
1081
1082 // We move the top of stack to a register (normally no move is invoved).
1083 Register tos = frame_->PopToRegister();
1084 // All other registers are spilled. The deferred code expects one argument
1085 // in a register and all other values are flushed to the stack. The
1086 // answer is returned in the same register that the top of stack argument was
1087 // in.
1088 frame_->SpillAll();
1089
Steve Blocka7e24c12009-10-30 11:49:00 +00001090 switch (op) {
1091 case Token::ADD: {
1092 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001093 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001094
Steve Block6ded16b2010-05-10 14:33:55 +01001095 __ add(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001096 deferred->Branch(vs);
Steve Block6ded16b2010-05-10 14:33:55 +01001097 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001098 deferred->Branch(ne);
1099 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001100 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001101 break;
1102 }
1103
1104 case Token::SUB: {
1105 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001106 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001107
1108 if (reversed) {
Steve Block6ded16b2010-05-10 14:33:55 +01001109 __ rsb(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001110 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001111 __ sub(tos, tos, Operand(value), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 }
1113 deferred->Branch(vs);
Steve Block6ded16b2010-05-10 14:33:55 +01001114 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001115 deferred->Branch(ne);
1116 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001117 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001118 break;
1119 }
1120
1121
1122 case Token::BIT_OR:
1123 case Token::BIT_XOR:
1124 case Token::BIT_AND: {
1125 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001126 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1127 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001128 deferred->Branch(ne);
1129 switch (op) {
Steve Block6ded16b2010-05-10 14:33:55 +01001130 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1131 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1132 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00001133 default: UNREACHABLE();
1134 }
1135 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001136 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001137 break;
1138 }
1139
1140 case Token::SHL:
1141 case Token::SHR:
1142 case Token::SAR: {
Steve Block6ded16b2010-05-10 14:33:55 +01001143 ASSERT(!reversed);
1144 Register scratch = VirtualFrame::scratch0();
1145 Register scratch2 = VirtualFrame::scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00001146 int shift_value = int_value & 0x1f; // least significant 5 bits
1147 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001148 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
Kristian Monsen25f61362010-05-21 11:50:48 +01001149 uint32_t problematic_mask = kSmiTagMask;
1150 // For unsigned shift by zero all negative smis are problematic.
1151 if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
1152 __ tst(tos, Operand(problematic_mask));
1153 deferred->Branch(ne); // Go slow for problematic input.
Steve Blocka7e24c12009-10-30 11:49:00 +00001154 switch (op) {
1155 case Token::SHL: {
1156 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001157 int adjusted_shift = shift_value - kSmiTagSize;
1158 ASSERT(adjusted_shift >= 0);
1159 if (adjusted_shift != 0) {
1160 __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1161 // Check that the *signed* result fits in a smi.
1162 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1163 deferred->Branch(mi);
1164 __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1165 } else {
1166 // Check that the *signed* result fits in a smi.
1167 __ add(scratch2, tos, Operand(0x40000000), SetCC);
1168 deferred->Branch(mi);
1169 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1170 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001171 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001172 break;
1173 }
1174 case Token::SHR: {
Steve Blocka7e24c12009-10-30 11:49:00 +00001175 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001176 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
1177 // LSR by immediate 0 means shifting 32 bits.
Steve Block6ded16b2010-05-10 14:33:55 +01001178 __ mov(scratch, Operand(scratch, LSR, shift_value));
Kristian Monsen25f61362010-05-21 11:50:48 +01001179 if (shift_value == 1) {
1180 // check that the *unsigned* result fits in a smi
1181 // neither of the two high-order bits can be set:
1182 // - 0x80000000: high bit would be lost when smi tagging
1183 // - 0x40000000: this number would convert to negative when
1184 // smi tagging these two cases can only happen with shifts
1185 // by 0 or 1 when handed a valid smi
1186 __ tst(scratch, Operand(0xc0000000));
1187 deferred->Branch(ne);
1188 }
1189 __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001190 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001191 break;
1192 }
1193 case Token::SAR: {
Kristian Monsen25f61362010-05-21 11:50:48 +01001194 // In the ARM instructions set, ASR by immediate 0 means shifting 32
1195 // bits.
Steve Blocka7e24c12009-10-30 11:49:00 +00001196 if (shift_value != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001197 // Do the shift and the tag removal in one operation. If the shift
1198 // is 31 bits (the highest possible value) then we emit the
1199 // instruction as a shift by 0 which means shift arithmetically by
1200 // 32.
1201 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1202 // Put tag back.
1203 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001204 }
1205 break;
1206 }
1207 default: UNREACHABLE();
1208 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001209 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001210 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 break;
1212 }
1213
1214 case Token::MOD: {
Steve Block6ded16b2010-05-10 14:33:55 +01001215 ASSERT(!reversed);
1216 ASSERT(int_value >= 2);
1217 ASSERT(IsPowerOf2(int_value));
Steve Blocka7e24c12009-10-30 11:49:00 +00001218 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001219 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001220 unsigned mask = (0x80000000u | kSmiTagMask);
Steve Block6ded16b2010-05-10 14:33:55 +01001221 __ tst(tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001222 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1223 mask = (int_value << kSmiTagSize) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +01001224 __ and_(tos, tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001226 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001227 break;
1228 }
1229
1230 case Token::MUL: {
Steve Block6ded16b2010-05-10 14:33:55 +01001231 ASSERT(IsEasyToMultiplyBy(int_value));
Steve Blocka7e24c12009-10-30 11:49:00 +00001232 DeferredCode* deferred =
Steve Block6ded16b2010-05-10 14:33:55 +01001233 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001234 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1235 max_smi_that_wont_overflow <<= kSmiTagSize;
1236 unsigned mask = 0x80000000u;
1237 while ((mask & max_smi_that_wont_overflow) == 0) {
1238 mask |= mask >> 1;
1239 }
1240 mask |= kSmiTagMask;
1241 // This does a single mask that checks for a too high value in a
1242 // conservative way and for a non-Smi. It also filters out negative
1243 // numbers, unfortunately, but since this code is inline we prefer
1244 // brevity to comprehensiveness.
Steve Block6ded16b2010-05-10 14:33:55 +01001245 __ tst(tos, Operand(mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001246 deferred->Branch(ne);
Steve Block6ded16b2010-05-10 14:33:55 +01001247 MultiplyByKnownInt(masm_, tos, tos, int_value);
Steve Blocka7e24c12009-10-30 11:49:00 +00001248 deferred->BindExit();
Steve Block6ded16b2010-05-10 14:33:55 +01001249 frame_->EmitPush(tos);
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 break;
1251 }
1252
1253 default:
Steve Block6ded16b2010-05-10 14:33:55 +01001254 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00001255 break;
1256 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001257}
1258
1259
1260void CodeGenerator::Comparison(Condition cc,
1261 Expression* left,
1262 Expression* right,
1263 bool strict) {
Steve Block6ded16b2010-05-10 14:33:55 +01001264 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001265
Steve Block6ded16b2010-05-10 14:33:55 +01001266 if (left != NULL) Load(left);
1267 if (right != NULL) Load(right);
1268
Steve Blocka7e24c12009-10-30 11:49:00 +00001269 // sp[0] : y
1270 // sp[1] : x
1271 // result : cc register
1272
1273 // Strict only makes sense for equality comparisons.
1274 ASSERT(!strict || cc == eq);
1275
Steve Block6ded16b2010-05-10 14:33:55 +01001276 Register lhs;
1277 Register rhs;
1278
1279 // We load the top two stack positions into registers chosen by the virtual
1280 // frame. This should keep the register shuffling to a minimum.
Steve Blocka7e24c12009-10-30 11:49:00 +00001281 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1282 if (cc == gt || cc == le) {
1283 cc = ReverseCondition(cc);
Steve Block6ded16b2010-05-10 14:33:55 +01001284 lhs = frame_->PopToRegister();
1285 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001287 rhs = frame_->PopToRegister();
1288 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
Steve Blocka7e24c12009-10-30 11:49:00 +00001289 }
Steve Block6ded16b2010-05-10 14:33:55 +01001290
1291 ASSERT(rhs.is(r0) || rhs.is(r1));
1292 ASSERT(lhs.is(r0) || lhs.is(r1));
1293
1294 // Now we have the two sides in r0 and r1. We flush any other registers
1295 // because the stub doesn't know about register allocation.
1296 frame_->SpillAll();
1297 Register scratch = VirtualFrame::scratch0();
1298 __ orr(scratch, lhs, Operand(rhs));
1299 __ tst(scratch, Operand(kSmiTagMask));
1300 JumpTarget smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00001301 smi.Branch(eq);
1302
1303 // Perform non-smi comparison by stub.
1304 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1305 // We call with 0 args because there are 0 on the stack.
Steve Block6ded16b2010-05-10 14:33:55 +01001306 if (!rhs.is(r0)) {
1307 __ Swap(rhs, lhs, ip);
1308 }
1309
Steve Blocka7e24c12009-10-30 11:49:00 +00001310 CompareStub stub(cc, strict);
1311 frame_->CallStub(&stub, 0);
1312 __ cmp(r0, Operand(0));
Steve Block6ded16b2010-05-10 14:33:55 +01001313 JumpTarget exit;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314 exit.Jump();
1315
1316 // Do smi comparisons by pointer comparison.
1317 smi.Bind();
Steve Block6ded16b2010-05-10 14:33:55 +01001318 __ cmp(lhs, Operand(rhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00001319
1320 exit.Bind();
1321 cc_reg_ = cc;
1322}
1323
1324
Steve Blocka7e24c12009-10-30 11:49:00 +00001325// Call the function on the stack with the given arguments.
1326void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
Leon Clarkee46be812010-01-19 14:06:41 +00001327 CallFunctionFlags flags,
1328 int position) {
Steve Block6ded16b2010-05-10 14:33:55 +01001329 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001330 // Push the arguments ("left-to-right") on the stack.
1331 int arg_count = args->length();
1332 for (int i = 0; i < arg_count; i++) {
1333 LoadAndSpill(args->at(i));
1334 }
1335
1336 // Record the position for debugging purposes.
1337 CodeForSourcePosition(position);
1338
1339 // Use the shared code stub to call the function.
1340 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00001341 CallFunctionStub call_function(arg_count, in_loop, flags);
Steve Blocka7e24c12009-10-30 11:49:00 +00001342 frame_->CallStub(&call_function, arg_count + 1);
1343
1344 // Restore context and pop function from the stack.
1345 __ ldr(cp, frame_->Context());
1346 frame_->Drop(); // discard the TOS
1347}
1348
1349
Steve Block6ded16b2010-05-10 14:33:55 +01001350void CodeGenerator::CallApplyLazy(Expression* applicand,
1351 Expression* receiver,
1352 VariableProxy* arguments,
1353 int position) {
1354 // An optimized implementation of expressions of the form
1355 // x.apply(y, arguments).
1356 // If the arguments object of the scope has not been allocated,
1357 // and x.apply is Function.prototype.apply, this optimization
1358 // just copies y and the arguments of the current function on the
1359 // stack, as receiver and arguments, and calls x.
1360 // In the implementation comments, we call x the applicand
1361 // and y the receiver.
1362 VirtualFrame::SpilledScope spilled_scope(frame_);
1363
1364 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1365 ASSERT(arguments->IsArguments());
1366
1367 // Load applicand.apply onto the stack. This will usually
1368 // give us a megamorphic load site. Not super, but it works.
1369 LoadAndSpill(applicand);
1370 Handle<String> name = Factory::LookupAsciiSymbol("apply");
1371 frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1372 frame_->EmitPush(r0);
1373
1374 // Load the receiver and the existing arguments object onto the
1375 // expression stack. Avoid allocating the arguments object here.
1376 LoadAndSpill(receiver);
1377 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
1378
1379 // Emit the source position information after having loaded the
1380 // receiver and the arguments.
1381 CodeForSourcePosition(position);
1382 // Contents of the stack at this point:
1383 // sp[0]: arguments object of the current function or the hole.
1384 // sp[1]: receiver
1385 // sp[2]: applicand.apply
1386 // sp[3]: applicand.
1387
1388 // Check if the arguments object has been lazily allocated
1389 // already. If so, just use that instead of copying the arguments
1390 // from the stack. This also deals with cases where a local variable
1391 // named 'arguments' has been introduced.
1392 __ ldr(r0, MemOperand(sp, 0));
1393
1394 Label slow, done;
1395 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1396 __ cmp(ip, r0);
1397 __ b(ne, &slow);
1398
1399 Label build_args;
1400 // Get rid of the arguments object probe.
1401 frame_->Drop();
1402 // Stack now has 3 elements on it.
1403 // Contents of stack at this point:
1404 // sp[0]: receiver
1405 // sp[1]: applicand.apply
1406 // sp[2]: applicand.
1407
1408 // Check that the receiver really is a JavaScript object.
1409 __ ldr(r0, MemOperand(sp, 0));
1410 __ BranchOnSmi(r0, &build_args);
1411 // We allow all JSObjects including JSFunctions. As long as
1412 // JS_FUNCTION_TYPE is the last instance type and it is right
1413 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1414 // bound.
1415 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1416 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1417 __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
1418 __ b(lt, &build_args);
1419
1420 // Check that applicand.apply is Function.prototype.apply.
1421 __ ldr(r0, MemOperand(sp, kPointerSize));
1422 __ BranchOnSmi(r0, &build_args);
1423 __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1424 __ b(ne, &build_args);
1425 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
1426 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1427 __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
1428 __ cmp(r1, Operand(apply_code));
1429 __ b(ne, &build_args);
1430
1431 // Check that applicand is a function.
1432 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1433 __ BranchOnSmi(r1, &build_args);
1434 __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1435 __ b(ne, &build_args);
1436
1437 // Copy the arguments to this function possibly from the
1438 // adaptor frame below it.
1439 Label invoke, adapted;
1440 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1441 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1442 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1443 __ b(eq, &adapted);
1444
1445 // No arguments adaptor frame. Copy fixed number of arguments.
1446 __ mov(r0, Operand(scope()->num_parameters()));
1447 for (int i = 0; i < scope()->num_parameters(); i++) {
1448 __ ldr(r2, frame_->ParameterAt(i));
1449 __ push(r2);
1450 }
1451 __ jmp(&invoke);
1452
1453 // Arguments adaptor frame present. Copy arguments from there, but
1454 // avoid copying too many arguments to avoid stack overflows.
1455 __ bind(&adapted);
1456 static const uint32_t kArgumentsLimit = 1 * KB;
1457 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1458 __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1459 __ mov(r3, r0);
1460 __ cmp(r0, Operand(kArgumentsLimit));
1461 __ b(gt, &build_args);
1462
1463 // Loop through the arguments pushing them onto the execution
1464 // stack. We don't inform the virtual frame of the push, so we don't
1465 // have to worry about getting rid of the elements from the virtual
1466 // frame.
1467 Label loop;
1468 // r3 is a small non-negative integer, due to the test above.
1469 __ cmp(r3, Operand(0));
1470 __ b(eq, &invoke);
1471 // Compute the address of the first argument.
1472 __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1473 __ add(r2, r2, Operand(kPointerSize));
1474 __ bind(&loop);
1475 // Post-decrement argument address by kPointerSize on each iteration.
1476 __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1477 __ push(r4);
1478 __ sub(r3, r3, Operand(1), SetCC);
1479 __ b(gt, &loop);
1480
1481 // Invoke the function.
1482 __ bind(&invoke);
1483 ParameterCount actual(r0);
1484 __ InvokeFunction(r1, actual, CALL_FUNCTION);
1485 // Drop applicand.apply and applicand from the stack, and push
1486 // the result of the function call, but leave the spilled frame
1487 // unchanged, with 3 elements, so it is correct when we compile the
1488 // slow-case code.
1489 __ add(sp, sp, Operand(2 * kPointerSize));
1490 __ push(r0);
1491 // Stack now has 1 element:
1492 // sp[0]: result
1493 __ jmp(&done);
1494
1495 // Slow-case: Allocate the arguments object since we know it isn't
1496 // there, and fall-through to the slow-case where we call
1497 // applicand.apply.
1498 __ bind(&build_args);
1499 // Stack now has 3 elements, because we have jumped from where:
1500 // sp[0]: receiver
1501 // sp[1]: applicand.apply
1502 // sp[2]: applicand.
1503 StoreArgumentsObject(false);
1504
1505 // Stack and frame now have 4 elements.
1506 __ bind(&slow);
1507
1508 // Generic computation of x.apply(y, args) with no special optimization.
1509 // Flip applicand.apply and applicand on the stack, so
1510 // applicand looks like the receiver of the applicand.apply call.
1511 // Then process it as a normal function call.
1512 __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1513 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
Kristian Monsen25f61362010-05-21 11:50:48 +01001514 __ strd(r0, MemOperand(sp, 2 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01001515
1516 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1517 frame_->CallStub(&call_function, 3);
1518 // The function and its two arguments have been dropped.
1519 frame_->Drop(); // Drop the receiver as well.
1520 frame_->EmitPush(r0);
1521 // Stack now has 1 element:
1522 // sp[0]: result
1523 __ bind(&done);
1524
1525 // Restore the context register after a call.
1526 __ ldr(cp, frame_->Context());
1527}
1528
1529
Steve Blocka7e24c12009-10-30 11:49:00 +00001530void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
Steve Block6ded16b2010-05-10 14:33:55 +01001531 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 ASSERT(has_cc());
1533 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1534 target->Branch(cc);
1535 cc_reg_ = al;
1536}
1537
1538
1539void CodeGenerator::CheckStack() {
Steve Block6ded16b2010-05-10 14:33:55 +01001540 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00001541 Comment cmnt(masm_, "[ check stack");
1542 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1543 // Put the lr setup instruction in the delay slot. kInstrSize is added to
1544 // the implicit 8 byte offset that always applies to operations with pc and
1545 // gives a return address 12 bytes down.
1546 masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1547 masm_->cmp(sp, Operand(ip));
1548 StackCheckStub stub;
1549 // Call the stub if lower.
1550 masm_->mov(pc,
1551 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1552 RelocInfo::CODE_TARGET),
1553 LeaveCC,
1554 lo);
Steve Blocka7e24c12009-10-30 11:49:00 +00001555}
1556
1557
1558void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1559#ifdef DEBUG
1560 int original_height = frame_->height();
1561#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001562 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001563 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1564 VisitAndSpill(statements->at(i));
1565 }
1566 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1567}
1568
1569
1570void CodeGenerator::VisitBlock(Block* node) {
1571#ifdef DEBUG
1572 int original_height = frame_->height();
1573#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001574 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 Comment cmnt(masm_, "[ Block");
1576 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001577 node->break_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 VisitStatementsAndSpill(node->statements());
1579 if (node->break_target()->is_linked()) {
1580 node->break_target()->Bind();
1581 }
1582 node->break_target()->Unuse();
1583 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1584}
1585
1586
1587void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
Steve Block3ce2e202009-11-05 08:53:23 +00001588 frame_->EmitPush(cp);
Steve Block6ded16b2010-05-10 14:33:55 +01001589 frame_->EmitPush(Operand(pairs));
1590 frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1591
1592 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001593 frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1594 // The result is discarded.
1595}
1596
1597
1598void CodeGenerator::VisitDeclaration(Declaration* node) {
1599#ifdef DEBUG
1600 int original_height = frame_->height();
1601#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 Comment cmnt(masm_, "[ Declaration");
1603 Variable* var = node->proxy()->var();
1604 ASSERT(var != NULL); // must have been resolved
1605 Slot* slot = var->slot();
1606
1607 // If it was not possible to allocate the variable at compile time,
1608 // we need to "declare" it at runtime to make sure it actually
1609 // exists in the local context.
1610 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1611 // Variables with a "LOOKUP" slot were introduced as non-locals
1612 // during variable resolution and must have mode DYNAMIC.
1613 ASSERT(var->is_dynamic());
1614 // For now, just do a runtime call.
1615 frame_->EmitPush(cp);
Steve Block6ded16b2010-05-10 14:33:55 +01001616 frame_->EmitPush(Operand(var->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001617 // Declaration nodes are always declared in only two modes.
1618 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1619 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
Steve Block6ded16b2010-05-10 14:33:55 +01001620 frame_->EmitPush(Operand(Smi::FromInt(attr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00001621 // Push initial value, if any.
1622 // Note: For variables we must not push an initial value (such as
1623 // 'undefined') because we may have a (legal) redeclaration and we
1624 // must not destroy the current value.
1625 if (node->mode() == Variable::CONST) {
Steve Block6ded16b2010-05-10 14:33:55 +01001626 frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00001627 } else if (node->fun() != NULL) {
Steve Block6ded16b2010-05-10 14:33:55 +01001628 Load(node->fun());
Steve Blocka7e24c12009-10-30 11:49:00 +00001629 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001630 frame_->EmitPush(Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001631 }
Steve Block6ded16b2010-05-10 14:33:55 +01001632
1633 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001634 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1635 // Ignore the return value (declarations are statements).
Steve Block6ded16b2010-05-10 14:33:55 +01001636
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 ASSERT(frame_->height() == original_height);
1638 return;
1639 }
1640
1641 ASSERT(!var->is_global());
1642
1643 // If we have a function or a constant, we need to initialize the variable.
1644 Expression* val = NULL;
1645 if (node->mode() == Variable::CONST) {
1646 val = new Literal(Factory::the_hole_value());
1647 } else {
1648 val = node->fun(); // NULL if we don't have a function
1649 }
1650
1651 if (val != NULL) {
Steve Block6ded16b2010-05-10 14:33:55 +01001652 // Set initial value.
1653 Reference target(this, node->proxy());
1654 Load(val);
1655 target.SetValue(NOT_CONST_INIT);
1656
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 // Get rid of the assigned value (declarations are statements).
1658 frame_->Drop();
1659 }
1660 ASSERT(frame_->height() == original_height);
1661}
1662
1663
1664void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1665#ifdef DEBUG
1666 int original_height = frame_->height();
1667#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001668 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001669 Comment cmnt(masm_, "[ ExpressionStatement");
1670 CodeForStatementPosition(node);
1671 Expression* expression = node->expression();
1672 expression->MarkAsStatement();
1673 LoadAndSpill(expression);
1674 frame_->Drop();
1675 ASSERT(frame_->height() == original_height);
1676}
1677
1678
1679void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1680#ifdef DEBUG
1681 int original_height = frame_->height();
1682#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001683 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 Comment cmnt(masm_, "// EmptyStatement");
1685 CodeForStatementPosition(node);
1686 // nothing to do
1687 ASSERT(frame_->height() == original_height);
1688}
1689
1690
1691void CodeGenerator::VisitIfStatement(IfStatement* node) {
1692#ifdef DEBUG
1693 int original_height = frame_->height();
1694#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001695 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001696 Comment cmnt(masm_, "[ IfStatement");
1697 // Generate different code depending on which parts of the if statement
1698 // are present or not.
1699 bool has_then_stm = node->HasThenStatement();
1700 bool has_else_stm = node->HasElseStatement();
1701
1702 CodeForStatementPosition(node);
1703
1704 JumpTarget exit;
1705 if (has_then_stm && has_else_stm) {
1706 Comment cmnt(masm_, "[ IfThenElse");
1707 JumpTarget then;
1708 JumpTarget else_;
1709 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001710 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001711 if (frame_ != NULL) {
1712 Branch(false, &else_);
1713 }
1714 // then
1715 if (frame_ != NULL || then.is_linked()) {
1716 then.Bind();
1717 VisitAndSpill(node->then_statement());
1718 }
1719 if (frame_ != NULL) {
1720 exit.Jump();
1721 }
1722 // else
1723 if (else_.is_linked()) {
1724 else_.Bind();
1725 VisitAndSpill(node->else_statement());
1726 }
1727
1728 } else if (has_then_stm) {
1729 Comment cmnt(masm_, "[ IfThen");
1730 ASSERT(!has_else_stm);
1731 JumpTarget then;
1732 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001733 LoadConditionAndSpill(node->condition(), &then, &exit, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001734 if (frame_ != NULL) {
1735 Branch(false, &exit);
1736 }
1737 // then
1738 if (frame_ != NULL || then.is_linked()) {
1739 then.Bind();
1740 VisitAndSpill(node->then_statement());
1741 }
1742
1743 } else if (has_else_stm) {
1744 Comment cmnt(masm_, "[ IfElse");
1745 ASSERT(!has_then_stm);
1746 JumpTarget else_;
1747 // if (!cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001748 LoadConditionAndSpill(node->condition(), &exit, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001749 if (frame_ != NULL) {
1750 Branch(true, &exit);
1751 }
1752 // else
1753 if (frame_ != NULL || else_.is_linked()) {
1754 else_.Bind();
1755 VisitAndSpill(node->else_statement());
1756 }
1757
1758 } else {
1759 Comment cmnt(masm_, "[ If");
1760 ASSERT(!has_then_stm && !has_else_stm);
1761 // if (cond)
Steve Blockd0582a62009-12-15 09:54:21 +00001762 LoadConditionAndSpill(node->condition(), &exit, &exit, false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001763 if (frame_ != NULL) {
1764 if (has_cc()) {
1765 cc_reg_ = al;
1766 } else {
1767 frame_->Drop();
1768 }
1769 }
1770 }
1771
1772 // end
1773 if (exit.is_linked()) {
1774 exit.Bind();
1775 }
1776 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1777}
1778
1779
1780void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001781 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001782 Comment cmnt(masm_, "[ ContinueStatement");
1783 CodeForStatementPosition(node);
1784 node->target()->continue_target()->Jump();
1785}
1786
1787
1788void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001789 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001790 Comment cmnt(masm_, "[ BreakStatement");
1791 CodeForStatementPosition(node);
1792 node->target()->break_target()->Jump();
1793}
1794
1795
1796void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01001797 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001798 Comment cmnt(masm_, "[ ReturnStatement");
1799
1800 CodeForStatementPosition(node);
1801 LoadAndSpill(node->expression());
1802 if (function_return_is_shadowed_) {
1803 frame_->EmitPop(r0);
1804 function_return_.Jump();
1805 } else {
1806 // Pop the result from the frame and prepare the frame for
1807 // returning thus making it easier to merge.
1808 frame_->EmitPop(r0);
1809 frame_->PrepareForReturn();
1810
1811 function_return_.Jump();
1812 }
1813}
1814
1815
1816void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1817#ifdef DEBUG
1818 int original_height = frame_->height();
1819#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001820 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 Comment cmnt(masm_, "[ WithEnterStatement");
1822 CodeForStatementPosition(node);
1823 LoadAndSpill(node->expression());
1824 if (node->is_catch_block()) {
1825 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1826 } else {
1827 frame_->CallRuntime(Runtime::kPushContext, 1);
1828 }
1829#ifdef DEBUG
1830 JumpTarget verified_true;
Steve Block6ded16b2010-05-10 14:33:55 +01001831 __ cmp(r0, cp);
Steve Blocka7e24c12009-10-30 11:49:00 +00001832 verified_true.Branch(eq);
1833 __ stop("PushContext: r0 is expected to be the same as cp");
1834 verified_true.Bind();
1835#endif
1836 // Update context local.
1837 __ str(cp, frame_->Context());
1838 ASSERT(frame_->height() == original_height);
1839}
1840
1841
1842void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1843#ifdef DEBUG
1844 int original_height = frame_->height();
1845#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001846 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 Comment cmnt(masm_, "[ WithExitStatement");
1848 CodeForStatementPosition(node);
1849 // Pop context.
1850 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1851 // Update context local.
1852 __ str(cp, frame_->Context());
1853 ASSERT(frame_->height() == original_height);
1854}
1855
1856
1857void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1858#ifdef DEBUG
1859 int original_height = frame_->height();
1860#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001861 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001862 Comment cmnt(masm_, "[ SwitchStatement");
1863 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001864 node->break_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001865
1866 LoadAndSpill(node->tag());
1867
1868 JumpTarget next_test;
1869 JumpTarget fall_through;
1870 JumpTarget default_entry;
1871 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
1872 ZoneList<CaseClause*>* cases = node->cases();
1873 int length = cases->length();
1874 CaseClause* default_clause = NULL;
1875
1876 for (int i = 0; i < length; i++) {
1877 CaseClause* clause = cases->at(i);
1878 if (clause->is_default()) {
1879 // Remember the default clause and compile it at the end.
1880 default_clause = clause;
1881 continue;
1882 }
1883
1884 Comment cmnt(masm_, "[ Case clause");
1885 // Compile the test.
1886 next_test.Bind();
1887 next_test.Unuse();
1888 // Duplicate TOS.
1889 __ ldr(r0, frame_->Top());
1890 frame_->EmitPush(r0);
1891 Comparison(eq, NULL, clause->label(), true);
1892 Branch(false, &next_test);
1893
1894 // Before entering the body from the test, remove the switch value from
1895 // the stack.
1896 frame_->Drop();
1897
1898 // Label the body so that fall through is enabled.
1899 if (i > 0 && cases->at(i - 1)->is_default()) {
1900 default_exit.Bind();
1901 } else {
1902 fall_through.Bind();
1903 fall_through.Unuse();
1904 }
1905 VisitStatementsAndSpill(clause->statements());
1906
1907 // If control flow can fall through from the body, jump to the next body
1908 // or the end of the statement.
1909 if (frame_ != NULL) {
1910 if (i < length - 1 && cases->at(i + 1)->is_default()) {
1911 default_entry.Jump();
1912 } else {
1913 fall_through.Jump();
1914 }
1915 }
1916 }
1917
1918 // The final "test" removes the switch value.
1919 next_test.Bind();
1920 frame_->Drop();
1921
1922 // If there is a default clause, compile it.
1923 if (default_clause != NULL) {
1924 Comment cmnt(masm_, "[ Default clause");
1925 default_entry.Bind();
1926 VisitStatementsAndSpill(default_clause->statements());
1927 // If control flow can fall out of the default and there is a case after
1928 // it, jup to that case's body.
1929 if (frame_ != NULL && default_exit.is_bound()) {
1930 default_exit.Jump();
1931 }
1932 }
1933
1934 if (fall_through.is_linked()) {
1935 fall_through.Bind();
1936 }
1937
1938 if (node->break_target()->is_linked()) {
1939 node->break_target()->Bind();
1940 }
1941 node->break_target()->Unuse();
1942 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1943}
1944
1945
Steve Block3ce2e202009-11-05 08:53:23 +00001946void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001947#ifdef DEBUG
1948 int original_height = frame_->height();
1949#endif
Steve Block6ded16b2010-05-10 14:33:55 +01001950 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00001951 Comment cmnt(masm_, "[ DoWhileStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 CodeForStatementPosition(node);
Kristian Monsen25f61362010-05-21 11:50:48 +01001953 node->break_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001954 JumpTarget body(JumpTarget::BIDIRECTIONAL);
Steve Block6ded16b2010-05-10 14:33:55 +01001955 IncrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00001956
Steve Block3ce2e202009-11-05 08:53:23 +00001957 // Label the top of the loop for the backward CFG edge. If the test
1958 // is always true we can use the continue target, and if the test is
1959 // always false there is no need.
1960 ConditionAnalysis info = AnalyzeCondition(node->cond());
1961 switch (info) {
1962 case ALWAYS_TRUE:
Kristian Monsen25f61362010-05-21 11:50:48 +01001963 node->continue_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00001964 node->continue_target()->Bind();
Steve Block3ce2e202009-11-05 08:53:23 +00001965 break;
1966 case ALWAYS_FALSE:
Kristian Monsen25f61362010-05-21 11:50:48 +01001967 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001968 break;
1969 case DONT_KNOW:
Kristian Monsen25f61362010-05-21 11:50:48 +01001970 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00001971 body.Bind();
1972 break;
1973 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001974
Steve Block3ce2e202009-11-05 08:53:23 +00001975 CheckStack(); // TODO(1222600): ignore if body contains calls.
1976 VisitAndSpill(node->body());
Steve Blocka7e24c12009-10-30 11:49:00 +00001977
Steve Blockd0582a62009-12-15 09:54:21 +00001978 // Compile the test.
Steve Block3ce2e202009-11-05 08:53:23 +00001979 switch (info) {
1980 case ALWAYS_TRUE:
1981 // If control can fall off the end of the body, jump back to the
1982 // top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001983 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00001984 node->continue_target()->Jump();
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 }
1986 break;
Steve Block3ce2e202009-11-05 08:53:23 +00001987 case ALWAYS_FALSE:
1988 // If we have a continue in the body, we only have to bind its
1989 // jump target.
1990 if (node->continue_target()->is_linked()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001991 node->continue_target()->Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00001992 }
Steve Block3ce2e202009-11-05 08:53:23 +00001993 break;
1994 case DONT_KNOW:
1995 // We have to compile the test expression if it can be reached by
1996 // control flow falling out of the body or via continue.
1997 if (node->continue_target()->is_linked()) {
1998 node->continue_target()->Bind();
1999 }
2000 if (has_valid_frame()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002001 Comment cmnt(masm_, "[ DoWhileCondition");
2002 CodeForDoWhileConditionPosition(node);
2003 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002004 if (has_valid_frame()) {
Steve Block3ce2e202009-11-05 08:53:23 +00002005 // A invalid frame here indicates that control did not
2006 // fall out of the test expression.
2007 Branch(true, &body);
Steve Blocka7e24c12009-10-30 11:49:00 +00002008 }
2009 }
2010 break;
Steve Blocka7e24c12009-10-30 11:49:00 +00002011 }
2012
2013 if (node->break_target()->is_linked()) {
2014 node->break_target()->Bind();
2015 }
Steve Block6ded16b2010-05-10 14:33:55 +01002016 DecrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002017 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2018}
2019
2020
2021void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2022#ifdef DEBUG
2023 int original_height = frame_->height();
2024#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002025 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002026 Comment cmnt(masm_, "[ WhileStatement");
2027 CodeForStatementPosition(node);
2028
2029 // If the test is never true and has no side effects there is no need
2030 // to compile the test or body.
2031 ConditionAnalysis info = AnalyzeCondition(node->cond());
2032 if (info == ALWAYS_FALSE) return;
2033
Kristian Monsen25f61362010-05-21 11:50:48 +01002034 node->break_target()->SetExpectedHeight();
Steve Block6ded16b2010-05-10 14:33:55 +01002035 IncrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002036
2037 // Label the top of the loop with the continue target for the backward
2038 // CFG edge.
Kristian Monsen25f61362010-05-21 11:50:48 +01002039 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002040 node->continue_target()->Bind();
2041
2042 if (info == DONT_KNOW) {
2043 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00002044 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00002045 if (has_valid_frame()) {
2046 // A NULL frame indicates that control did not fall out of the
2047 // test expression.
2048 Branch(false, node->break_target());
2049 }
2050 if (has_valid_frame() || body.is_linked()) {
2051 body.Bind();
2052 }
2053 }
2054
2055 if (has_valid_frame()) {
2056 CheckStack(); // TODO(1222600): ignore if body contains calls.
2057 VisitAndSpill(node->body());
2058
2059 // If control flow can fall out of the body, jump back to the top.
2060 if (has_valid_frame()) {
2061 node->continue_target()->Jump();
2062 }
2063 }
2064 if (node->break_target()->is_linked()) {
2065 node->break_target()->Bind();
2066 }
Steve Block6ded16b2010-05-10 14:33:55 +01002067 DecrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002068 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2069}
2070
2071
2072void CodeGenerator::VisitForStatement(ForStatement* node) {
2073#ifdef DEBUG
2074 int original_height = frame_->height();
2075#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002076 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002077 Comment cmnt(masm_, "[ ForStatement");
2078 CodeForStatementPosition(node);
2079 if (node->init() != NULL) {
2080 VisitAndSpill(node->init());
2081 }
2082
2083 // If the test is never true there is no need to compile the test or
2084 // body.
2085 ConditionAnalysis info = AnalyzeCondition(node->cond());
2086 if (info == ALWAYS_FALSE) return;
2087
Kristian Monsen25f61362010-05-21 11:50:48 +01002088 node->break_target()->SetExpectedHeight();
Steve Block6ded16b2010-05-10 14:33:55 +01002089 IncrementLoopNesting();
Steve Block3ce2e202009-11-05 08:53:23 +00002090
2091 // If there is no update statement, label the top of the loop with the
2092 // continue target, otherwise with the loop target.
2093 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2094 if (node->next() == NULL) {
Kristian Monsen25f61362010-05-21 11:50:48 +01002095 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002096 node->continue_target()->Bind();
2097 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01002098 node->continue_target()->SetExpectedHeight();
Steve Block3ce2e202009-11-05 08:53:23 +00002099 loop.Bind();
2100 }
2101
2102 // If the test is always true, there is no need to compile it.
2103 if (info == DONT_KNOW) {
2104 JumpTarget body;
Steve Blockd0582a62009-12-15 09:54:21 +00002105 LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
Steve Block3ce2e202009-11-05 08:53:23 +00002106 if (has_valid_frame()) {
2107 Branch(false, node->break_target());
2108 }
2109 if (has_valid_frame() || body.is_linked()) {
2110 body.Bind();
2111 }
2112 }
2113
2114 if (has_valid_frame()) {
2115 CheckStack(); // TODO(1222600): ignore if body contains calls.
2116 VisitAndSpill(node->body());
2117
2118 if (node->next() == NULL) {
2119 // If there is no update statement and control flow can fall out
2120 // of the loop, jump directly to the continue label.
2121 if (has_valid_frame()) {
2122 node->continue_target()->Jump();
2123 }
2124 } else {
2125 // If there is an update statement and control flow can reach it
2126 // via falling out of the body of the loop or continuing, we
2127 // compile the update statement.
2128 if (node->continue_target()->is_linked()) {
2129 node->continue_target()->Bind();
2130 }
2131 if (has_valid_frame()) {
2132 // Record source position of the statement as this code which is
2133 // after the code for the body actually belongs to the loop
2134 // statement and not the body.
2135 CodeForStatementPosition(node);
2136 VisitAndSpill(node->next());
2137 loop.Jump();
2138 }
2139 }
2140 }
2141 if (node->break_target()->is_linked()) {
2142 node->break_target()->Bind();
2143 }
Steve Block6ded16b2010-05-10 14:33:55 +01002144 DecrementLoopNesting();
Steve Blocka7e24c12009-10-30 11:49:00 +00002145 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2146}
2147
2148
2149void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2150#ifdef DEBUG
2151 int original_height = frame_->height();
2152#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002153 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002154 Comment cmnt(masm_, "[ ForInStatement");
2155 CodeForStatementPosition(node);
2156
2157 JumpTarget primitive;
2158 JumpTarget jsobject;
2159 JumpTarget fixed_array;
2160 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2161 JumpTarget end_del_check;
2162 JumpTarget exit;
2163
2164 // Get the object to enumerate over (converted to JSObject).
2165 LoadAndSpill(node->enumerable());
2166
2167 // Both SpiderMonkey and kjs ignore null and undefined in contrast
2168 // to the specification. 12.6.4 mandates a call to ToObject.
2169 frame_->EmitPop(r0);
2170 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2171 __ cmp(r0, ip);
2172 exit.Branch(eq);
2173 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2174 __ cmp(r0, ip);
2175 exit.Branch(eq);
2176
2177 // Stack layout in body:
2178 // [iteration counter (Smi)]
2179 // [length of array]
2180 // [FixedArray]
2181 // [Map or 0]
2182 // [Object]
2183
2184 // Check if enumerable is already a JSObject
2185 __ tst(r0, Operand(kSmiTagMask));
2186 primitive.Branch(eq);
2187 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2188 jsobject.Branch(hs);
2189
2190 primitive.Bind();
2191 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00002192 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00002193
2194 jsobject.Bind();
2195 // Get the set of properties (as a FixedArray or Map).
Steve Blockd0582a62009-12-15 09:54:21 +00002196 // r0: value to be iterated over
2197 frame_->EmitPush(r0); // Push the object being iterated over.
2198
2199 // Check cache validity in generated code. This is a fast case for
2200 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2201 // guarantee cache validity, call the runtime system to check cache
2202 // validity or get the property names in a fixed array.
2203 JumpTarget call_runtime;
2204 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2205 JumpTarget check_prototype;
2206 JumpTarget use_cache;
2207 __ mov(r1, Operand(r0));
2208 loop.Bind();
2209 // Check that there are no elements.
2210 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2211 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2212 __ cmp(r2, r4);
2213 call_runtime.Branch(ne);
2214 // Check that instance descriptors are not empty so that we can
2215 // check for an enum cache. Leave the map in r3 for the subsequent
2216 // prototype load.
2217 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2218 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2219 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2220 __ cmp(r2, ip);
2221 call_runtime.Branch(eq);
2222 // Check that there in an enum cache in the non-empty instance
2223 // descriptors. This is the case if the next enumeration index
2224 // field does not contain a smi.
2225 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2226 __ tst(r2, Operand(kSmiTagMask));
2227 call_runtime.Branch(eq);
2228 // For all objects but the receiver, check that the cache is empty.
2229 // r4: empty fixed array root.
2230 __ cmp(r1, r0);
2231 check_prototype.Branch(eq);
2232 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2233 __ cmp(r2, r4);
2234 call_runtime.Branch(ne);
2235 check_prototype.Bind();
2236 // Load the prototype from the map and loop if non-null.
2237 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2238 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2239 __ cmp(r1, ip);
2240 loop.Branch(ne);
2241 // The enum cache is valid. Load the map of the object being
2242 // iterated over and use the cache for the iteration.
2243 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2244 use_cache.Jump();
2245
2246 call_runtime.Bind();
2247 // Call the runtime to get the property names for the object.
2248 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
Steve Blocka7e24c12009-10-30 11:49:00 +00002249 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2250
Steve Blockd0582a62009-12-15 09:54:21 +00002251 // If we got a map from the runtime call, we can do a fast
2252 // modification check. Otherwise, we got a fixed array, and we have
2253 // to do a slow check.
2254 // r0: map or fixed array (result from call to
2255 // Runtime::kGetPropertyNamesFast)
Steve Blocka7e24c12009-10-30 11:49:00 +00002256 __ mov(r2, Operand(r0));
2257 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2258 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2259 __ cmp(r1, ip);
2260 fixed_array.Branch(ne);
2261
Steve Blockd0582a62009-12-15 09:54:21 +00002262 use_cache.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00002263 // Get enum cache
Steve Blockd0582a62009-12-15 09:54:21 +00002264 // r0: map (either the result from a call to
2265 // Runtime::kGetPropertyNamesFast or has been fetched directly from
2266 // the object)
Steve Blocka7e24c12009-10-30 11:49:00 +00002267 __ mov(r1, Operand(r0));
2268 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2269 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2270 __ ldr(r2,
2271 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2272
2273 frame_->EmitPush(r0); // map
2274 frame_->EmitPush(r2); // enum cache bridge cache
2275 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2276 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2277 frame_->EmitPush(r0);
2278 __ mov(r0, Operand(Smi::FromInt(0)));
2279 frame_->EmitPush(r0);
2280 entry.Jump();
2281
2282 fixed_array.Bind();
2283 __ mov(r1, Operand(Smi::FromInt(0)));
2284 frame_->EmitPush(r1); // insert 0 in place of Map
2285 frame_->EmitPush(r0);
2286
2287 // Push the length of the array and the initial index onto the stack.
2288 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2289 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2290 frame_->EmitPush(r0);
2291 __ mov(r0, Operand(Smi::FromInt(0))); // init index
2292 frame_->EmitPush(r0);
2293
2294 // Condition.
2295 entry.Bind();
2296 // sp[0] : index
2297 // sp[1] : array/enum cache length
2298 // sp[2] : array or enum cache
2299 // sp[3] : 0 or map
2300 // sp[4] : enumerable
2301 // Grab the current frame's height for the break and continue
2302 // targets only after all the state is pushed on the frame.
Kristian Monsen25f61362010-05-21 11:50:48 +01002303 node->break_target()->SetExpectedHeight();
2304 node->continue_target()->SetExpectedHeight();
Steve Blocka7e24c12009-10-30 11:49:00 +00002305
Kristian Monsen25f61362010-05-21 11:50:48 +01002306 // Load the current count to r0, load the length to r1.
2307 __ ldrd(r0, frame_->ElementAt(0));
Steve Block6ded16b2010-05-10 14:33:55 +01002308 __ cmp(r0, r1); // compare to the array length
Steve Blocka7e24c12009-10-30 11:49:00 +00002309 node->break_target()->Branch(hs);
2310
2311 __ ldr(r0, frame_->ElementAt(0));
2312
2313 // Get the i'th entry of the array.
2314 __ ldr(r2, frame_->ElementAt(2));
2315 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2316 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2317
2318 // Get Map or 0.
2319 __ ldr(r2, frame_->ElementAt(3));
2320 // Check if this (still) matches the map of the enumerable.
2321 // If not, we have to filter the key.
2322 __ ldr(r1, frame_->ElementAt(4));
2323 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2324 __ cmp(r1, Operand(r2));
2325 end_del_check.Branch(eq);
2326
2327 // Convert the entry to a string (or null if it isn't a property anymore).
2328 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
2329 frame_->EmitPush(r0);
2330 frame_->EmitPush(r3); // push entry
Steve Blockd0582a62009-12-15 09:54:21 +00002331 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002332 __ mov(r3, Operand(r0));
2333
2334 // If the property has been removed while iterating, we just skip it.
2335 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2336 __ cmp(r3, ip);
2337 node->continue_target()->Branch(eq);
2338
2339 end_del_check.Bind();
2340 // Store the entry in the 'each' expression and take another spin in the
2341 // loop. r3: i'th entry of the enum cache (or string there of)
2342 frame_->EmitPush(r3); // push entry
2343 { Reference each(this, node->each());
2344 if (!each.is_illegal()) {
2345 if (each.size() > 0) {
2346 __ ldr(r0, frame_->ElementAt(each.size()));
2347 frame_->EmitPush(r0);
Leon Clarked91b9f72010-01-27 17:25:45 +00002348 each.SetValue(NOT_CONST_INIT);
2349 frame_->Drop(2);
2350 } else {
2351 // If the reference was to a slot we rely on the convenient property
2352 // that it doesn't matter whether a value (eg, r3 pushed above) is
2353 // right on top of or right underneath a zero-sized reference.
2354 each.SetValue(NOT_CONST_INIT);
2355 frame_->Drop();
Steve Blocka7e24c12009-10-30 11:49:00 +00002356 }
2357 }
2358 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002359 // Body.
2360 CheckStack(); // TODO(1222600): ignore if body contains calls.
2361 VisitAndSpill(node->body());
2362
2363 // Next. Reestablish a spilled frame in case we are coming here via
2364 // a continue in the body.
2365 node->continue_target()->Bind();
2366 frame_->SpillAll();
2367 frame_->EmitPop(r0);
2368 __ add(r0, r0, Operand(Smi::FromInt(1)));
2369 frame_->EmitPush(r0);
2370 entry.Jump();
2371
2372 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
2373 // any frame.
2374 node->break_target()->Bind();
2375 frame_->Drop(5);
2376
2377 // Exit.
2378 exit.Bind();
2379 node->continue_target()->Unuse();
2380 node->break_target()->Unuse();
2381 ASSERT(frame_->height() == original_height);
2382}
2383
2384
Steve Block3ce2e202009-11-05 08:53:23 +00002385void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002386#ifdef DEBUG
2387 int original_height = frame_->height();
2388#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002389 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002390 Comment cmnt(masm_, "[ TryCatchStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002391 CodeForStatementPosition(node);
2392
2393 JumpTarget try_block;
2394 JumpTarget exit;
2395
2396 try_block.Call();
2397 // --- Catch block ---
2398 frame_->EmitPush(r0);
2399
2400 // Store the caught exception in the catch variable.
Leon Clarkee46be812010-01-19 14:06:41 +00002401 Variable* catch_var = node->catch_var()->var();
2402 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2403 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
Steve Blocka7e24c12009-10-30 11:49:00 +00002404
2405 // Remove the exception from the stack.
2406 frame_->Drop();
2407
2408 VisitStatementsAndSpill(node->catch_block()->statements());
2409 if (frame_ != NULL) {
2410 exit.Jump();
2411 }
2412
2413
2414 // --- Try block ---
2415 try_block.Bind();
2416
2417 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2418 int handler_height = frame_->height();
2419
2420 // Shadow the labels for all escapes from the try block, including
2421 // returns. During shadowing, the original label is hidden as the
2422 // LabelShadow and operations on the original actually affect the
2423 // shadowing label.
2424 //
2425 // We should probably try to unify the escaping labels and the return
2426 // label.
2427 int nof_escapes = node->escaping_targets()->length();
2428 List<ShadowTarget*> shadows(1 + nof_escapes);
2429
2430 // Add the shadow target for the function return.
2431 static const int kReturnShadowIndex = 0;
2432 shadows.Add(new ShadowTarget(&function_return_));
2433 bool function_return_was_shadowed = function_return_is_shadowed_;
2434 function_return_is_shadowed_ = true;
2435 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2436
2437 // Add the remaining shadow targets.
2438 for (int i = 0; i < nof_escapes; i++) {
2439 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2440 }
2441
2442 // Generate code for the statements in the try block.
2443 VisitStatementsAndSpill(node->try_block()->statements());
2444
2445 // Stop the introduced shadowing and count the number of required unlinks.
2446 // After shadowing stops, the original labels are unshadowed and the
2447 // LabelShadows represent the formerly shadowing labels.
2448 bool has_unlinks = false;
2449 for (int i = 0; i < shadows.length(); i++) {
2450 shadows[i]->StopShadowing();
2451 has_unlinks = has_unlinks || shadows[i]->is_linked();
2452 }
2453 function_return_is_shadowed_ = function_return_was_shadowed;
2454
2455 // Get an external reference to the handler address.
2456 ExternalReference handler_address(Top::k_handler_address);
2457
2458 // If we can fall off the end of the try block, unlink from try chain.
2459 if (has_valid_frame()) {
2460 // The next handler address is on top of the frame. Unlink from
2461 // the handler list and drop the rest of this handler from the
2462 // frame.
2463 ASSERT(StackHandlerConstants::kNextOffset == 0);
2464 frame_->EmitPop(r1);
2465 __ mov(r3, Operand(handler_address));
2466 __ str(r1, MemOperand(r3));
2467 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2468 if (has_unlinks) {
2469 exit.Jump();
2470 }
2471 }
2472
2473 // Generate unlink code for the (formerly) shadowing labels that have been
2474 // jumped to. Deallocate each shadow target.
2475 for (int i = 0; i < shadows.length(); i++) {
2476 if (shadows[i]->is_linked()) {
2477 // Unlink from try chain;
2478 shadows[i]->Bind();
2479 // Because we can be jumping here (to spilled code) from unspilled
2480 // code, we need to reestablish a spilled frame at this block.
2481 frame_->SpillAll();
2482
2483 // Reload sp from the top handler, because some statements that we
2484 // break from (eg, for...in) may have left stuff on the stack.
2485 __ mov(r3, Operand(handler_address));
2486 __ ldr(sp, MemOperand(r3));
2487 frame_->Forget(frame_->height() - handler_height);
2488
2489 ASSERT(StackHandlerConstants::kNextOffset == 0);
2490 frame_->EmitPop(r1);
2491 __ str(r1, MemOperand(r3));
2492 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2493
2494 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2495 frame_->PrepareForReturn();
2496 }
2497 shadows[i]->other_target()->Jump();
2498 }
2499 }
2500
2501 exit.Bind();
2502 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2503}
2504
2505
Steve Block3ce2e202009-11-05 08:53:23 +00002506void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002507#ifdef DEBUG
2508 int original_height = frame_->height();
2509#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002510 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Block3ce2e202009-11-05 08:53:23 +00002511 Comment cmnt(masm_, "[ TryFinallyStatement");
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 CodeForStatementPosition(node);
2513
2514 // State: Used to keep track of reason for entering the finally
2515 // block. Should probably be extended to hold information for
2516 // break/continue from within the try block.
2517 enum { FALLING, THROWING, JUMPING };
2518
2519 JumpTarget try_block;
2520 JumpTarget finally_block;
2521
2522 try_block.Call();
2523
2524 frame_->EmitPush(r0); // save exception object on the stack
2525 // In case of thrown exceptions, this is where we continue.
2526 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2527 finally_block.Jump();
2528
2529 // --- Try block ---
2530 try_block.Bind();
2531
2532 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2533 int handler_height = frame_->height();
2534
2535 // Shadow the labels for all escapes from the try block, including
2536 // returns. Shadowing hides the original label as the LabelShadow and
2537 // operations on the original actually affect the shadowing label.
2538 //
2539 // We should probably try to unify the escaping labels and the return
2540 // label.
2541 int nof_escapes = node->escaping_targets()->length();
2542 List<ShadowTarget*> shadows(1 + nof_escapes);
2543
2544 // Add the shadow target for the function return.
2545 static const int kReturnShadowIndex = 0;
2546 shadows.Add(new ShadowTarget(&function_return_));
2547 bool function_return_was_shadowed = function_return_is_shadowed_;
2548 function_return_is_shadowed_ = true;
2549 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2550
2551 // Add the remaining shadow targets.
2552 for (int i = 0; i < nof_escapes; i++) {
2553 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2554 }
2555
2556 // Generate code for the statements in the try block.
2557 VisitStatementsAndSpill(node->try_block()->statements());
2558
2559 // Stop the introduced shadowing and count the number of required unlinks.
2560 // After shadowing stops, the original labels are unshadowed and the
2561 // LabelShadows represent the formerly shadowing labels.
2562 int nof_unlinks = 0;
2563 for (int i = 0; i < shadows.length(); i++) {
2564 shadows[i]->StopShadowing();
2565 if (shadows[i]->is_linked()) nof_unlinks++;
2566 }
2567 function_return_is_shadowed_ = function_return_was_shadowed;
2568
2569 // Get an external reference to the handler address.
2570 ExternalReference handler_address(Top::k_handler_address);
2571
2572 // If we can fall off the end of the try block, unlink from the try
2573 // chain and set the state on the frame to FALLING.
2574 if (has_valid_frame()) {
2575 // The next handler address is on top of the frame.
2576 ASSERT(StackHandlerConstants::kNextOffset == 0);
2577 frame_->EmitPop(r1);
2578 __ mov(r3, Operand(handler_address));
2579 __ str(r1, MemOperand(r3));
2580 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2581
2582 // Fake a top of stack value (unneeded when FALLING) and set the
2583 // state in r2, then jump around the unlink blocks if any.
2584 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2585 frame_->EmitPush(r0);
2586 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2587 if (nof_unlinks > 0) {
2588 finally_block.Jump();
2589 }
2590 }
2591
2592 // Generate code to unlink and set the state for the (formerly)
2593 // shadowing targets that have been jumped to.
2594 for (int i = 0; i < shadows.length(); i++) {
2595 if (shadows[i]->is_linked()) {
2596 // If we have come from the shadowed return, the return value is
2597 // in (a non-refcounted reference to) r0. We must preserve it
2598 // until it is pushed.
2599 //
2600 // Because we can be jumping here (to spilled code) from
2601 // unspilled code, we need to reestablish a spilled frame at
2602 // this block.
2603 shadows[i]->Bind();
2604 frame_->SpillAll();
2605
2606 // Reload sp from the top handler, because some statements that
2607 // we break from (eg, for...in) may have left stuff on the
2608 // stack.
2609 __ mov(r3, Operand(handler_address));
2610 __ ldr(sp, MemOperand(r3));
2611 frame_->Forget(frame_->height() - handler_height);
2612
2613 // Unlink this handler and drop it from the frame. The next
2614 // handler address is currently on top of the frame.
2615 ASSERT(StackHandlerConstants::kNextOffset == 0);
2616 frame_->EmitPop(r1);
2617 __ str(r1, MemOperand(r3));
2618 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2619
2620 if (i == kReturnShadowIndex) {
2621 // If this label shadowed the function return, materialize the
2622 // return value on the stack.
2623 frame_->EmitPush(r0);
2624 } else {
2625 // Fake TOS for targets that shadowed breaks and continues.
2626 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2627 frame_->EmitPush(r0);
2628 }
2629 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2630 if (--nof_unlinks > 0) {
2631 // If this is not the last unlink block, jump around the next.
2632 finally_block.Jump();
2633 }
2634 }
2635 }
2636
2637 // --- Finally block ---
2638 finally_block.Bind();
2639
2640 // Push the state on the stack.
2641 frame_->EmitPush(r2);
2642
2643 // We keep two elements on the stack - the (possibly faked) result
2644 // and the state - while evaluating the finally block.
2645 //
2646 // Generate code for the statements in the finally block.
2647 VisitStatementsAndSpill(node->finally_block()->statements());
2648
2649 if (has_valid_frame()) {
2650 // Restore state and return value or faked TOS.
2651 frame_->EmitPop(r2);
2652 frame_->EmitPop(r0);
2653 }
2654
2655 // Generate code to jump to the right destination for all used
2656 // formerly shadowing targets. Deallocate each shadow target.
2657 for (int i = 0; i < shadows.length(); i++) {
2658 if (has_valid_frame() && shadows[i]->is_bound()) {
2659 JumpTarget* original = shadows[i]->other_target();
2660 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2661 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2662 JumpTarget skip;
2663 skip.Branch(ne);
2664 frame_->PrepareForReturn();
2665 original->Jump();
2666 skip.Bind();
2667 } else {
2668 original->Branch(eq);
2669 }
2670 }
2671 }
2672
2673 if (has_valid_frame()) {
2674 // Check if we need to rethrow the exception.
2675 JumpTarget exit;
2676 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2677 exit.Branch(ne);
2678
2679 // Rethrow exception.
2680 frame_->EmitPush(r0);
2681 frame_->CallRuntime(Runtime::kReThrow, 1);
2682
2683 // Done.
2684 exit.Bind();
2685 }
2686 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2687}
2688
2689
2690void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2691#ifdef DEBUG
2692 int original_height = frame_->height();
2693#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002694 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002695 Comment cmnt(masm_, "[ DebuggerStatament");
2696 CodeForStatementPosition(node);
2697#ifdef ENABLE_DEBUGGER_SUPPORT
Andrei Popescu402d9372010-02-26 13:31:12 +00002698 frame_->DebugBreak();
Steve Blocka7e24c12009-10-30 11:49:00 +00002699#endif
2700 // Ignore the return value.
2701 ASSERT(frame_->height() == original_height);
2702}
2703
2704
Steve Block6ded16b2010-05-10 14:33:55 +01002705void CodeGenerator::InstantiateFunction(
2706 Handle<SharedFunctionInfo> function_info) {
2707 VirtualFrame::SpilledScope spilled_scope(frame_);
2708 __ mov(r0, Operand(function_info));
Leon Clarkee46be812010-01-19 14:06:41 +00002709 // Use the fast case closure allocation code that allocates in new
2710 // space for nested functions that don't need literals cloning.
Steve Block6ded16b2010-05-10 14:33:55 +01002711 if (scope()->is_function_scope() && function_info->num_literals() == 0) {
Leon Clarkee46be812010-01-19 14:06:41 +00002712 FastNewClosureStub stub;
2713 frame_->EmitPush(r0);
2714 frame_->CallStub(&stub, 1);
2715 frame_->EmitPush(r0);
2716 } else {
2717 // Create a new closure.
2718 frame_->EmitPush(cp);
2719 frame_->EmitPush(r0);
2720 frame_->CallRuntime(Runtime::kNewClosure, 2);
2721 frame_->EmitPush(r0);
2722 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002723}
2724
2725
2726void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2727#ifdef DEBUG
2728 int original_height = frame_->height();
2729#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002730 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002731 Comment cmnt(masm_, "[ FunctionLiteral");
2732
Steve Block6ded16b2010-05-10 14:33:55 +01002733 // Build the function info and instantiate it.
2734 Handle<SharedFunctionInfo> function_info =
2735 Compiler::BuildFunctionInfo(node, script(), this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002736 // Check for stack-overflow exception.
2737 if (HasStackOverflow()) {
2738 ASSERT(frame_->height() == original_height);
2739 return;
2740 }
Steve Block6ded16b2010-05-10 14:33:55 +01002741 InstantiateFunction(function_info);
2742 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002743}
2744
2745
Steve Block6ded16b2010-05-10 14:33:55 +01002746void CodeGenerator::VisitSharedFunctionInfoLiteral(
2747 SharedFunctionInfoLiteral* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002748#ifdef DEBUG
2749 int original_height = frame_->height();
2750#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002751 VirtualFrame::SpilledScope spilled_scope(frame_);
2752 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2753 InstantiateFunction(node->shared_function_info());
2754 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002755}
2756
2757
2758void CodeGenerator::VisitConditional(Conditional* node) {
2759#ifdef DEBUG
2760 int original_height = frame_->height();
2761#endif
Steve Block6ded16b2010-05-10 14:33:55 +01002762 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002763 Comment cmnt(masm_, "[ Conditional");
2764 JumpTarget then;
2765 JumpTarget else_;
Steve Blockd0582a62009-12-15 09:54:21 +00002766 LoadConditionAndSpill(node->condition(), &then, &else_, true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 if (has_valid_frame()) {
2768 Branch(false, &else_);
2769 }
2770 if (has_valid_frame() || then.is_linked()) {
2771 then.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002772 LoadAndSpill(node->then_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 }
2774 if (else_.is_linked()) {
2775 JumpTarget exit;
2776 if (has_valid_frame()) exit.Jump();
2777 else_.Bind();
Steve Blockd0582a62009-12-15 09:54:21 +00002778 LoadAndSpill(node->else_expression());
Steve Blocka7e24c12009-10-30 11:49:00 +00002779 if (exit.is_linked()) exit.Bind();
2780 }
Steve Block6ded16b2010-05-10 14:33:55 +01002781 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00002782}
2783
2784
2785void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002786 if (slot->type() == Slot::LOOKUP) {
2787 ASSERT(slot->var()->is_dynamic());
2788
Steve Block6ded16b2010-05-10 14:33:55 +01002789 // JumpTargets do not yet support merging frames so the frame must be
2790 // spilled when jumping to these targets.
Steve Blocka7e24c12009-10-30 11:49:00 +00002791 JumpTarget slow;
2792 JumpTarget done;
2793
Kristian Monsen25f61362010-05-21 11:50:48 +01002794 // Generate fast case for loading from slots that correspond to
2795 // local/global variables or arguments unless they are shadowed by
2796 // eval-introduced bindings.
2797 EmitDynamicLoadFromSlotFastCase(slot,
2798 typeof_state,
2799 &slow,
2800 &done);
Steve Blocka7e24c12009-10-30 11:49:00 +00002801
2802 slow.Bind();
Steve Block6ded16b2010-05-10 14:33:55 +01002803 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002804 frame_->EmitPush(cp);
2805 __ mov(r0, Operand(slot->var()->name()));
2806 frame_->EmitPush(r0);
2807
2808 if (typeof_state == INSIDE_TYPEOF) {
2809 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2810 } else {
2811 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2812 }
2813
2814 done.Bind();
2815 frame_->EmitPush(r0);
2816
2817 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002818 Register scratch = VirtualFrame::scratch0();
2819 frame_->EmitPush(SlotOperand(slot, scratch));
Steve Blocka7e24c12009-10-30 11:49:00 +00002820 if (slot->var()->mode() == Variable::CONST) {
2821 // Const slots may contain 'the hole' value (the constant hasn't been
2822 // initialized yet) which needs to be converted into the 'undefined'
2823 // value.
2824 Comment cmnt(masm_, "[ Unhole const");
Steve Block6ded16b2010-05-10 14:33:55 +01002825 frame_->EmitPop(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +00002826 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01002827 __ cmp(scratch, ip);
2828 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
2829 frame_->EmitPush(scratch);
Steve Blocka7e24c12009-10-30 11:49:00 +00002830 }
2831 }
2832}
2833
2834
Steve Block6ded16b2010-05-10 14:33:55 +01002835void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
2836 TypeofState state) {
2837 LoadFromSlot(slot, state);
2838
2839 // Bail out quickly if we're not using lazy arguments allocation.
2840 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
2841
2842 // ... or if the slot isn't a non-parameter arguments slot.
2843 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
2844
2845 VirtualFrame::SpilledScope spilled_scope(frame_);
2846
2847 // Load the loaded value from the stack into r0 but leave it on the
2848 // stack.
2849 __ ldr(r0, MemOperand(sp, 0));
2850
2851 // If the loaded value is the sentinel that indicates that we
2852 // haven't loaded the arguments object yet, we need to do it now.
2853 JumpTarget exit;
2854 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2855 __ cmp(r0, ip);
2856 exit.Branch(ne);
2857 frame_->Drop();
2858 StoreArgumentsObject(false);
2859 exit.Bind();
2860}
2861
2862
Leon Clarkee46be812010-01-19 14:06:41 +00002863void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
2864 ASSERT(slot != NULL);
2865 if (slot->type() == Slot::LOOKUP) {
Steve Block6ded16b2010-05-10 14:33:55 +01002866 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarkee46be812010-01-19 14:06:41 +00002867 ASSERT(slot->var()->is_dynamic());
2868
2869 // For now, just do a runtime call.
2870 frame_->EmitPush(cp);
2871 __ mov(r0, Operand(slot->var()->name()));
2872 frame_->EmitPush(r0);
2873
2874 if (init_state == CONST_INIT) {
2875 // Same as the case for a normal store, but ignores attribute
2876 // (e.g. READ_ONLY) of context slot so that we can initialize
2877 // const properties (introduced via eval("const foo = (some
2878 // expr);")). Also, uses the current function context instead of
2879 // the top context.
2880 //
2881 // Note that we must declare the foo upon entry of eval(), via a
2882 // context slot declaration, but we cannot initialize it at the
2883 // same time, because the const declaration may be at the end of
2884 // the eval code (sigh...) and the const variable may have been
2885 // used before (where its value is 'undefined'). Thus, we can only
2886 // do the initialization when we actually encounter the expression
2887 // and when the expression operands are defined and valid, and
2888 // thus we need the split into 2 operations: declaration of the
2889 // context slot followed by initialization.
2890 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2891 } else {
2892 frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
2893 }
2894 // Storing a variable must keep the (new) value on the expression
2895 // stack. This is necessary for compiling assignment expressions.
2896 frame_->EmitPush(r0);
2897
2898 } else {
2899 ASSERT(!slot->var()->is_dynamic());
Steve Block6ded16b2010-05-10 14:33:55 +01002900 Register scratch = VirtualFrame::scratch0();
2901 VirtualFrame::RegisterAllocationScope scope(this);
Leon Clarkee46be812010-01-19 14:06:41 +00002902
Steve Block6ded16b2010-05-10 14:33:55 +01002903 // The frame must be spilled when branching to this target.
Leon Clarkee46be812010-01-19 14:06:41 +00002904 JumpTarget exit;
Steve Block6ded16b2010-05-10 14:33:55 +01002905
Leon Clarkee46be812010-01-19 14:06:41 +00002906 if (init_state == CONST_INIT) {
2907 ASSERT(slot->var()->mode() == Variable::CONST);
2908 // Only the first const initialization must be executed (the slot
2909 // still contains 'the hole' value). When the assignment is
2910 // executed, the code is identical to a normal store (see below).
2911 Comment cmnt(masm_, "[ Init const");
Steve Block6ded16b2010-05-10 14:33:55 +01002912 __ ldr(scratch, SlotOperand(slot, scratch));
Leon Clarkee46be812010-01-19 14:06:41 +00002913 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01002914 __ cmp(scratch, ip);
2915 frame_->SpillAll();
Leon Clarkee46be812010-01-19 14:06:41 +00002916 exit.Branch(ne);
2917 }
2918
2919 // We must execute the store. Storing a variable must keep the
2920 // (new) value on the stack. This is necessary for compiling
2921 // assignment expressions.
2922 //
2923 // Note: We will reach here even with slot->var()->mode() ==
2924 // Variable::CONST because of const declarations which will
2925 // initialize consts to 'the hole' value and by doing so, end up
2926 // calling this code. r2 may be loaded with context; used below in
2927 // RecordWrite.
Steve Block6ded16b2010-05-10 14:33:55 +01002928 Register tos = frame_->Peek();
2929 __ str(tos, SlotOperand(slot, scratch));
Leon Clarkee46be812010-01-19 14:06:41 +00002930 if (slot->type() == Slot::CONTEXT) {
2931 // Skip write barrier if the written value is a smi.
Steve Block6ded16b2010-05-10 14:33:55 +01002932 __ tst(tos, Operand(kSmiTagMask));
2933 // We don't use tos any more after here.
2934 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarkee46be812010-01-19 14:06:41 +00002935 exit.Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01002936 // scratch is loaded with context when calling SlotOperand above.
Leon Clarkee46be812010-01-19 14:06:41 +00002937 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
2938 __ mov(r3, Operand(offset));
Steve Block6ded16b2010-05-10 14:33:55 +01002939 // r1 could be identical with tos, but that doesn't matter.
2940 __ RecordWrite(scratch, r3, r1);
Leon Clarkee46be812010-01-19 14:06:41 +00002941 }
2942 // If we definitely did not jump over the assignment, we do not need
2943 // to bind the exit label. Doing so can defeat peephole
2944 // optimization.
2945 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
Steve Block6ded16b2010-05-10 14:33:55 +01002946 frame_->SpillAll();
Leon Clarkee46be812010-01-19 14:06:41 +00002947 exit.Bind();
2948 }
2949 }
2950}
2951
2952
Steve Blocka7e24c12009-10-30 11:49:00 +00002953void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
2954 TypeofState typeof_state,
Steve Blocka7e24c12009-10-30 11:49:00 +00002955 JumpTarget* slow) {
2956 // Check that no extension objects have been created by calls to
2957 // eval from the current scope to the global scope.
Steve Block6ded16b2010-05-10 14:33:55 +01002958 Register tmp = frame_->scratch0();
2959 Register tmp2 = frame_->scratch1();
Steve Blocka7e24c12009-10-30 11:49:00 +00002960 Register context = cp;
2961 Scope* s = scope();
2962 while (s != NULL) {
2963 if (s->num_heap_slots() > 0) {
2964 if (s->calls_eval()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002965 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00002966 // Check that extension is NULL.
2967 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
2968 __ tst(tmp2, tmp2);
2969 slow->Branch(ne);
2970 }
2971 // Load next context in chain.
2972 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
2973 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2974 context = tmp;
2975 }
2976 // If no outer scope calls eval, we do not need to check more
2977 // context extensions.
2978 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2979 s = s->outer_scope();
2980 }
2981
2982 if (s->is_eval_scope()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002983 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00002984 Label next, fast;
Steve Block6ded16b2010-05-10 14:33:55 +01002985 __ Move(tmp, context);
Steve Blocka7e24c12009-10-30 11:49:00 +00002986 __ bind(&next);
2987 // Terminate at global context.
2988 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2989 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
2990 __ cmp(tmp2, ip);
2991 __ b(eq, &fast);
2992 // Check that extension is NULL.
2993 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2994 __ tst(tmp2, tmp2);
2995 slow->Branch(ne);
2996 // Load next context in chain.
2997 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
2998 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
2999 __ b(&next);
3000 __ bind(&fast);
3001 }
3002
Steve Blocka7e24c12009-10-30 11:49:00 +00003003 // Load the global object.
3004 LoadGlobal();
Steve Block6ded16b2010-05-10 14:33:55 +01003005 // Setup the name register and call load IC.
3006 frame_->CallLoadIC(slot->var()->name(),
3007 typeof_state == INSIDE_TYPEOF
3008 ? RelocInfo::CODE_TARGET
3009 : RelocInfo::CODE_TARGET_CONTEXT);
Steve Blocka7e24c12009-10-30 11:49:00 +00003010 // Drop the global object. The result is in r0.
3011 frame_->Drop();
3012}
3013
3014
Kristian Monsen25f61362010-05-21 11:50:48 +01003015void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3016 TypeofState typeof_state,
3017 JumpTarget* slow,
3018 JumpTarget* done) {
3019 // Generate fast-case code for variables that might be shadowed by
3020 // eval-introduced variables. Eval is used a lot without
3021 // introducing variables. In those cases, we do not want to
3022 // perform a runtime call for all variables in the scope
3023 // containing the eval.
3024 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3025 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3026 frame_->SpillAll();
3027 done->Jump();
3028
3029 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3030 frame_->SpillAll();
3031 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3032 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3033 if (potential_slot != NULL) {
3034 // Generate fast case for locals that rewrite to slots.
3035 __ ldr(r0,
3036 ContextSlotOperandCheckExtensions(potential_slot,
3037 r1,
3038 r2,
3039 slow));
3040 if (potential_slot->var()->mode() == Variable::CONST) {
3041 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3042 __ cmp(r0, ip);
3043 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3044 }
3045 done->Jump();
3046 } else if (rewrite != NULL) {
3047 // Generate fast case for argument loads.
3048 Property* property = rewrite->AsProperty();
3049 if (property != NULL) {
3050 VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3051 Literal* key_literal = property->key()->AsLiteral();
3052 if (obj_proxy != NULL &&
3053 key_literal != NULL &&
3054 obj_proxy->IsArguments() &&
3055 key_literal->handle()->IsSmi()) {
3056 // Load arguments object if there are no eval-introduced
3057 // variables. Then load the argument from the arguments
3058 // object using keyed load.
3059 __ ldr(r0,
3060 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
3061 r1,
3062 r2,
3063 slow));
3064 frame_->EmitPush(r0);
3065 __ mov(r1, Operand(key_literal->handle()));
3066 frame_->EmitPush(r1);
3067 EmitKeyedLoad();
3068 done->Jump();
3069 }
3070 }
3071 }
3072 }
3073}
3074
3075
Steve Blocka7e24c12009-10-30 11:49:00 +00003076void CodeGenerator::VisitSlot(Slot* node) {
3077#ifdef DEBUG
3078 int original_height = frame_->height();
3079#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003080 Comment cmnt(masm_, "[ Slot");
Steve Block6ded16b2010-05-10 14:33:55 +01003081 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3082 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003083}
3084
3085
3086void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3087#ifdef DEBUG
3088 int original_height = frame_->height();
3089#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003090 Comment cmnt(masm_, "[ VariableProxy");
3091
3092 Variable* var = node->var();
3093 Expression* expr = var->rewrite();
3094 if (expr != NULL) {
3095 Visit(expr);
3096 } else {
3097 ASSERT(var->is_global());
3098 Reference ref(this, node);
Steve Block6ded16b2010-05-10 14:33:55 +01003099 ref.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00003100 }
Steve Block6ded16b2010-05-10 14:33:55 +01003101 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003102}
3103
3104
3105void CodeGenerator::VisitLiteral(Literal* node) {
3106#ifdef DEBUG
3107 int original_height = frame_->height();
3108#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 Comment cmnt(masm_, "[ Literal");
Steve Block6ded16b2010-05-10 14:33:55 +01003110 Register reg = frame_->GetTOSRegister();
3111 __ mov(reg, Operand(node->handle()));
3112 frame_->EmitPush(reg);
3113 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003114}
3115
3116
3117void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3118#ifdef DEBUG
3119 int original_height = frame_->height();
3120#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003121 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003122 Comment cmnt(masm_, "[ RexExp Literal");
3123
3124 // Retrieve the literal array and check the allocated entry.
3125
3126 // Load the function of this activation.
3127 __ ldr(r1, frame_->Function());
3128
3129 // Load the literals array of the function.
3130 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
3131
3132 // Load the literal at the ast saved index.
3133 int literal_offset =
3134 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3135 __ ldr(r2, FieldMemOperand(r1, literal_offset));
3136
3137 JumpTarget done;
3138 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3139 __ cmp(r2, ip);
3140 done.Branch(ne);
3141
3142 // If the entry is undefined we call the runtime system to computed
3143 // the literal.
3144 frame_->EmitPush(r1); // literal array (0)
3145 __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
3146 frame_->EmitPush(r0); // literal index (1)
3147 __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
3148 frame_->EmitPush(r0);
3149 __ mov(r0, Operand(node->flags())); // RegExp flags (3)
3150 frame_->EmitPush(r0);
3151 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3152 __ mov(r2, Operand(r0));
3153
3154 done.Bind();
3155 // Push the literal.
3156 frame_->EmitPush(r2);
Steve Block6ded16b2010-05-10 14:33:55 +01003157 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003158}
3159
3160
Steve Blocka7e24c12009-10-30 11:49:00 +00003161void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3162#ifdef DEBUG
3163 int original_height = frame_->height();
3164#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003165 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003166 Comment cmnt(masm_, "[ ObjectLiteral");
3167
Steve Blocka7e24c12009-10-30 11:49:00 +00003168 // Load the function of this activation.
Steve Block6ded16b2010-05-10 14:33:55 +01003169 __ ldr(r3, frame_->Function());
Leon Clarkee46be812010-01-19 14:06:41 +00003170 // Literal array.
Steve Block6ded16b2010-05-10 14:33:55 +01003171 __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00003172 // Literal index.
Steve Block6ded16b2010-05-10 14:33:55 +01003173 __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
Leon Clarkee46be812010-01-19 14:06:41 +00003174 // Constant properties.
Steve Block6ded16b2010-05-10 14:33:55 +01003175 __ mov(r1, Operand(node->constant_properties()));
3176 // Should the object literal have fast elements?
3177 __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3178 frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
Leon Clarkee46be812010-01-19 14:06:41 +00003179 if (node->depth() > 1) {
Steve Block6ded16b2010-05-10 14:33:55 +01003180 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
Leon Clarkee46be812010-01-19 14:06:41 +00003181 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01003182 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003183 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003184 frame_->EmitPush(r0); // save the result
Steve Blocka7e24c12009-10-30 11:49:00 +00003185 for (int i = 0; i < node->properties()->length(); i++) {
Andrei Popescu402d9372010-02-26 13:31:12 +00003186 // At the start of each iteration, the top of stack contains
3187 // the newly created object literal.
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 ObjectLiteral::Property* property = node->properties()->at(i);
3189 Literal* key = property->key();
3190 Expression* value = property->value();
3191 switch (property->kind()) {
3192 case ObjectLiteral::Property::CONSTANT:
3193 break;
3194 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3195 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3196 // else fall through
Andrei Popescu402d9372010-02-26 13:31:12 +00003197 case ObjectLiteral::Property::COMPUTED:
3198 if (key->handle()->IsSymbol()) {
3199 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3200 LoadAndSpill(value);
3201 frame_->EmitPop(r0);
3202 __ mov(r2, Operand(key->handle()));
3203 __ ldr(r1, frame_->Top()); // Load the receiver.
3204 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3205 break;
3206 }
3207 // else fall through
Steve Blocka7e24c12009-10-30 11:49:00 +00003208 case ObjectLiteral::Property::PROTOTYPE: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003209 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003210 frame_->EmitPush(r0); // dup the result
3211 LoadAndSpill(key);
3212 LoadAndSpill(value);
3213 frame_->CallRuntime(Runtime::kSetProperty, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003214 break;
3215 }
3216 case ObjectLiteral::Property::SETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003217 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003218 frame_->EmitPush(r0);
3219 LoadAndSpill(key);
3220 __ mov(r0, Operand(Smi::FromInt(1)));
3221 frame_->EmitPush(r0);
3222 LoadAndSpill(value);
3223 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003224 break;
3225 }
3226 case ObjectLiteral::Property::GETTER: {
Andrei Popescu402d9372010-02-26 13:31:12 +00003227 __ ldr(r0, frame_->Top());
Steve Blocka7e24c12009-10-30 11:49:00 +00003228 frame_->EmitPush(r0);
3229 LoadAndSpill(key);
3230 __ mov(r0, Operand(Smi::FromInt(0)));
3231 frame_->EmitPush(r0);
3232 LoadAndSpill(value);
3233 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
Steve Blocka7e24c12009-10-30 11:49:00 +00003234 break;
3235 }
3236 }
3237 }
Steve Block6ded16b2010-05-10 14:33:55 +01003238 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003239}
3240
3241
Steve Blocka7e24c12009-10-30 11:49:00 +00003242void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3243#ifdef DEBUG
3244 int original_height = frame_->height();
3245#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003246 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003247 Comment cmnt(masm_, "[ ArrayLiteral");
3248
Steve Blocka7e24c12009-10-30 11:49:00 +00003249 // Load the function of this activation.
Leon Clarkee46be812010-01-19 14:06:41 +00003250 __ ldr(r2, frame_->Function());
Andrei Popescu402d9372010-02-26 13:31:12 +00003251 // Load the literals array of the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003252 __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00003253 __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
Leon Clarkee46be812010-01-19 14:06:41 +00003254 __ mov(r0, Operand(node->constant_elements()));
3255 frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
Andrei Popescu402d9372010-02-26 13:31:12 +00003256 int length = node->values()->length();
Leon Clarkee46be812010-01-19 14:06:41 +00003257 if (node->depth() > 1) {
3258 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00003259 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
Leon Clarkee46be812010-01-19 14:06:41 +00003260 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
Andrei Popescu402d9372010-02-26 13:31:12 +00003261 } else {
3262 FastCloneShallowArrayStub stub(length);
3263 frame_->CallStub(&stub, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003264 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003265 frame_->EmitPush(r0); // save the result
Leon Clarkee46be812010-01-19 14:06:41 +00003266 // r0: created object literal
Steve Blocka7e24c12009-10-30 11:49:00 +00003267
3268 // Generate code to set the elements in the array that are not
3269 // literals.
3270 for (int i = 0; i < node->values()->length(); i++) {
3271 Expression* value = node->values()->at(i);
3272
3273 // If value is a literal the property value is already set in the
3274 // boilerplate object.
3275 if (value->AsLiteral() != NULL) continue;
3276 // If value is a materialized literal the property value is already set
3277 // in the boilerplate object if it is simple.
3278 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3279
3280 // The property must be set by generated code.
3281 LoadAndSpill(value);
3282 frame_->EmitPop(r0);
3283
3284 // Fetch the object literal.
3285 __ ldr(r1, frame_->Top());
3286 // Get the elements array.
3287 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3288
3289 // Write to the indexed properties array.
3290 int offset = i * kPointerSize + FixedArray::kHeaderSize;
3291 __ str(r0, FieldMemOperand(r1, offset));
3292
3293 // Update the write barrier for the array address.
3294 __ mov(r3, Operand(offset));
3295 __ RecordWrite(r1, r3, r2);
3296 }
Steve Block6ded16b2010-05-10 14:33:55 +01003297 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003298}
3299
3300
3301void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3302#ifdef DEBUG
3303 int original_height = frame_->height();
3304#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003305 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003306 // Call runtime routine to allocate the catch extension object and
3307 // assign the exception value to the catch variable.
3308 Comment cmnt(masm_, "[ CatchExtensionObject");
3309 LoadAndSpill(node->key());
3310 LoadAndSpill(node->value());
3311 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3312 frame_->EmitPush(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01003313 ASSERT_EQ(original_height + 1, frame_->height());
3314}
3315
3316
3317void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3318#ifdef DEBUG
3319 int original_height = frame_->height();
3320#endif
3321 Comment cmnt(masm(), "[ Variable Assignment");
3322 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3323 ASSERT(var != NULL);
3324 Slot* slot = var->slot();
3325 ASSERT(slot != NULL);
3326
3327 // Evaluate the right-hand side.
3328 if (node->is_compound()) {
3329 // For a compound assignment the right-hand side is a binary operation
3330 // between the current property value and the actual right-hand side.
3331 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3332
3333 // Perform the binary operation.
3334 Literal* literal = node->value()->AsLiteral();
3335 bool overwrite_value =
3336 (node->value()->AsBinaryOperation() != NULL &&
3337 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3338 if (literal != NULL && literal->handle()->IsSmi()) {
3339 SmiOperation(node->binary_op(),
3340 literal->handle(),
3341 false,
3342 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3343 } else {
3344 Load(node->value());
3345 VirtualFrameBinaryOperation(
3346 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3347 }
3348 } else {
3349 Load(node->value());
3350 }
3351
3352 // Perform the assignment.
3353 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3354 CodeForSourcePosition(node->position());
3355 StoreToSlot(slot,
3356 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3357 }
3358 ASSERT_EQ(original_height + 1, frame_->height());
3359}
3360
3361
3362void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3363#ifdef DEBUG
3364 int original_height = frame_->height();
3365#endif
3366 Comment cmnt(masm(), "[ Named Property Assignment");
3367 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3368 Property* prop = node->target()->AsProperty();
3369 ASSERT(var == NULL || (prop == NULL && var->is_global()));
3370
3371 // Initialize name and evaluate the receiver sub-expression if necessary. If
3372 // the receiver is trivial it is not placed on the stack at this point, but
3373 // loaded whenever actually needed.
3374 Handle<String> name;
3375 bool is_trivial_receiver = false;
3376 if (var != NULL) {
3377 name = var->name();
3378 } else {
3379 Literal* lit = prop->key()->AsLiteral();
3380 ASSERT_NOT_NULL(lit);
3381 name = Handle<String>::cast(lit->handle());
3382 // Do not materialize the receiver on the frame if it is trivial.
3383 is_trivial_receiver = prop->obj()->IsTrivial();
3384 if (!is_trivial_receiver) Load(prop->obj());
3385 }
3386
3387 // Change to slow case in the beginning of an initialization block to
3388 // avoid the quadratic behavior of repeatedly adding fast properties.
3389 if (node->starts_initialization_block()) {
3390 // Initialization block consists of assignments of the form expr.x = ..., so
3391 // this will never be an assignment to a variable, so there must be a
3392 // receiver object.
3393 ASSERT_EQ(NULL, var);
3394 if (is_trivial_receiver) {
3395 Load(prop->obj());
3396 } else {
3397 frame_->Dup();
3398 }
3399 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3400 }
3401
3402 // Change to fast case at the end of an initialization block. To prepare for
3403 // that add an extra copy of the receiver to the frame, so that it can be
3404 // converted back to fast case after the assignment.
3405 if (node->ends_initialization_block() && !is_trivial_receiver) {
3406 frame_->Dup();
3407 }
3408
3409 // Stack layout:
3410 // [tos] : receiver (only materialized if non-trivial)
3411 // [tos+1] : receiver if at the end of an initialization block
3412
3413 // Evaluate the right-hand side.
3414 if (node->is_compound()) {
3415 // For a compound assignment the right-hand side is a binary operation
3416 // between the current property value and the actual right-hand side.
3417 if (is_trivial_receiver) {
3418 Load(prop->obj());
3419 } else if (var != NULL) {
3420 LoadGlobal();
3421 } else {
3422 frame_->Dup();
3423 }
3424 EmitNamedLoad(name, var != NULL);
3425 frame_->Drop(); // Receiver is left on the stack.
3426 frame_->EmitPush(r0);
3427
3428 // Perform the binary operation.
3429 Literal* literal = node->value()->AsLiteral();
3430 bool overwrite_value =
3431 (node->value()->AsBinaryOperation() != NULL &&
3432 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3433 if (literal != NULL && literal->handle()->IsSmi()) {
3434 SmiOperation(node->binary_op(),
3435 literal->handle(),
3436 false,
3437 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3438 } else {
3439 Load(node->value());
3440 VirtualFrameBinaryOperation(
3441 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3442 }
3443 } else {
3444 // For non-compound assignment just load the right-hand side.
3445 Load(node->value());
3446 }
3447
3448 // Stack layout:
3449 // [tos] : value
3450 // [tos+1] : receiver (only materialized if non-trivial)
3451 // [tos+2] : receiver if at the end of an initialization block
3452
3453 // Perform the assignment. It is safe to ignore constants here.
3454 ASSERT(var == NULL || var->mode() != Variable::CONST);
3455 ASSERT_NE(Token::INIT_CONST, node->op());
3456 if (is_trivial_receiver) {
3457 // Load the receiver and swap with the value.
3458 Load(prop->obj());
3459 Register t0 = frame_->PopToRegister();
3460 Register t1 = frame_->PopToRegister(t0);
3461 frame_->EmitPush(t0);
3462 frame_->EmitPush(t1);
3463 }
3464 CodeForSourcePosition(node->position());
3465 bool is_contextual = (var != NULL);
3466 EmitNamedStore(name, is_contextual);
3467 frame_->EmitPush(r0);
3468
3469 // Change to fast case at the end of an initialization block.
3470 if (node->ends_initialization_block()) {
3471 ASSERT_EQ(NULL, var);
3472 // The argument to the runtime call is the receiver.
3473 if (is_trivial_receiver) {
3474 Load(prop->obj());
3475 } else {
3476 // A copy of the receiver is below the value of the assignment. Swap
3477 // the receiver and the value of the assignment expression.
3478 Register t0 = frame_->PopToRegister();
3479 Register t1 = frame_->PopToRegister(t0);
3480 frame_->EmitPush(t0);
3481 frame_->EmitPush(t1);
3482 }
3483 frame_->CallRuntime(Runtime::kToFastProperties, 1);
3484 }
3485
3486 // Stack layout:
3487 // [tos] : result
3488
3489 ASSERT_EQ(original_height + 1, frame_->height());
3490}
3491
3492
3493void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3494#ifdef DEBUG
3495 int original_height = frame_->height();
3496#endif
3497 Comment cmnt(masm_, "[ Keyed Property Assignment");
3498 Property* prop = node->target()->AsProperty();
3499 ASSERT_NOT_NULL(prop);
3500
3501 // Evaluate the receiver subexpression.
3502 Load(prop->obj());
3503
3504 // Change to slow case in the beginning of an initialization block to
3505 // avoid the quadratic behavior of repeatedly adding fast properties.
3506 if (node->starts_initialization_block()) {
3507 frame_->Dup();
3508 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3509 }
3510
3511 // Change to fast case at the end of an initialization block. To prepare for
3512 // that add an extra copy of the receiver to the frame, so that it can be
3513 // converted back to fast case after the assignment.
3514 if (node->ends_initialization_block()) {
3515 frame_->Dup();
3516 }
3517
3518 // Evaluate the key subexpression.
3519 Load(prop->key());
3520
3521 // Stack layout:
3522 // [tos] : key
3523 // [tos+1] : receiver
3524 // [tos+2] : receiver if at the end of an initialization block
3525
3526 // Evaluate the right-hand side.
3527 if (node->is_compound()) {
3528 // For a compound assignment the right-hand side is a binary operation
3529 // between the current property value and the actual right-hand side.
Kristian Monsen25f61362010-05-21 11:50:48 +01003530 // Duplicate receiver and key for loading the current property value.
3531 frame_->Dup2();
Steve Block6ded16b2010-05-10 14:33:55 +01003532 EmitKeyedLoad();
3533 frame_->EmitPush(r0);
3534
3535 // Perform the binary operation.
3536 Literal* literal = node->value()->AsLiteral();
3537 bool overwrite_value =
3538 (node->value()->AsBinaryOperation() != NULL &&
3539 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3540 if (literal != NULL && literal->handle()->IsSmi()) {
3541 SmiOperation(node->binary_op(),
3542 literal->handle(),
3543 false,
3544 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3545 } else {
3546 Load(node->value());
3547 VirtualFrameBinaryOperation(
3548 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3549 }
3550 } else {
3551 // For non-compound assignment just load the right-hand side.
3552 Load(node->value());
3553 }
3554
3555 // Stack layout:
3556 // [tos] : value
3557 // [tos+1] : key
3558 // [tos+2] : receiver
3559 // [tos+3] : receiver if at the end of an initialization block
3560
3561 // Perform the assignment. It is safe to ignore constants here.
3562 ASSERT(node->op() != Token::INIT_CONST);
3563 CodeForSourcePosition(node->position());
3564 frame_->PopToR0();
3565 EmitKeyedStore(prop->key()->type());
3566 frame_->Drop(2); // Key and receiver are left on the stack.
3567 frame_->EmitPush(r0);
3568
3569 // Stack layout:
3570 // [tos] : result
3571 // [tos+1] : receiver if at the end of an initialization block
3572
3573 // Change to fast case at the end of an initialization block.
3574 if (node->ends_initialization_block()) {
3575 // The argument to the runtime call is the extra copy of the receiver,
3576 // which is below the value of the assignment. Swap the receiver and
3577 // the value of the assignment expression.
3578 Register t0 = frame_->PopToRegister();
3579 Register t1 = frame_->PopToRegister(t0);
3580 frame_->EmitPush(t1);
3581 frame_->EmitPush(t0);
3582 frame_->CallRuntime(Runtime::kToFastProperties, 1);
3583 }
3584
3585 // Stack layout:
3586 // [tos] : result
3587
3588 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003589}
3590
3591
3592void CodeGenerator::VisitAssignment(Assignment* node) {
Steve Block6ded16b2010-05-10 14:33:55 +01003593 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00003594#ifdef DEBUG
3595 int original_height = frame_->height();
3596#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003597 Comment cmnt(masm_, "[ Assignment");
3598
Steve Block6ded16b2010-05-10 14:33:55 +01003599 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3600 Property* prop = node->target()->AsProperty();
Steve Blocka7e24c12009-10-30 11:49:00 +00003601
Steve Block6ded16b2010-05-10 14:33:55 +01003602 if (var != NULL && !var->is_global()) {
3603 EmitSlotAssignment(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00003604
Steve Block6ded16b2010-05-10 14:33:55 +01003605 } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
3606 (var != NULL && var->is_global())) {
3607 // Properties whose keys are property names and global variables are
3608 // treated as named property references. We do not need to consider
3609 // global 'this' because it is not a valid left-hand side.
3610 EmitNamedPropertyAssignment(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00003611
Steve Block6ded16b2010-05-10 14:33:55 +01003612 } else if (prop != NULL) {
3613 // Other properties (including rewritten parameters for a function that
3614 // uses arguments) are keyed property assignments.
3615 EmitKeyedPropertyAssignment(node);
3616
3617 } else {
3618 // Invalid left-hand side.
3619 Load(node->target());
3620 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
3621 // The runtime call doesn't actually return but the code generator will
3622 // still generate code and expects a certain frame height.
3623 frame_->EmitPush(r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003624 }
Steve Block6ded16b2010-05-10 14:33:55 +01003625 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003626}
3627
3628
3629void CodeGenerator::VisitThrow(Throw* node) {
3630#ifdef DEBUG
3631 int original_height = frame_->height();
3632#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003633 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003634 Comment cmnt(masm_, "[ Throw");
3635
3636 LoadAndSpill(node->exception());
3637 CodeForSourcePosition(node->position());
3638 frame_->CallRuntime(Runtime::kThrow, 1);
3639 frame_->EmitPush(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01003640 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003641}
3642
3643
3644void CodeGenerator::VisitProperty(Property* node) {
3645#ifdef DEBUG
3646 int original_height = frame_->height();
3647#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003648 Comment cmnt(masm_, "[ Property");
3649
3650 { Reference property(this, node);
Steve Block6ded16b2010-05-10 14:33:55 +01003651 property.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00003652 }
Steve Block6ded16b2010-05-10 14:33:55 +01003653 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003654}
3655
3656
3657void CodeGenerator::VisitCall(Call* node) {
3658#ifdef DEBUG
3659 int original_height = frame_->height();
3660#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003661 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003662 Comment cmnt(masm_, "[ Call");
3663
3664 Expression* function = node->expression();
3665 ZoneList<Expression*>* args = node->arguments();
3666
3667 // Standard function call.
3668 // Check if the function is a variable or a property.
3669 Variable* var = function->AsVariableProxy()->AsVariable();
3670 Property* property = function->AsProperty();
3671
3672 // ------------------------------------------------------------------------
3673 // Fast-case: Use inline caching.
3674 // ---
3675 // According to ECMA-262, section 11.2.3, page 44, the function to call
3676 // must be resolved after the arguments have been evaluated. The IC code
3677 // automatically handles this by loading the arguments before the function
3678 // is resolved in cache misses (this also holds for megamorphic calls).
3679 // ------------------------------------------------------------------------
3680
3681 if (var != NULL && var->is_possibly_eval()) {
3682 // ----------------------------------
3683 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
3684 // ----------------------------------
3685
3686 // In a call to eval, we first call %ResolvePossiblyDirectEval to
3687 // resolve the function we need to call and the receiver of the
3688 // call. Then we call the resolved function using the given
3689 // arguments.
3690 // Prepare stack for call to resolved function.
3691 LoadAndSpill(function);
3692 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3693 frame_->EmitPush(r2); // Slot for receiver
3694 int arg_count = args->length();
3695 for (int i = 0; i < arg_count; i++) {
3696 LoadAndSpill(args->at(i));
3697 }
3698
3699 // Prepare stack for call to ResolvePossiblyDirectEval.
3700 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
3701 frame_->EmitPush(r1);
3702 if (arg_count > 0) {
3703 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3704 frame_->EmitPush(r1);
3705 } else {
3706 frame_->EmitPush(r2);
3707 }
3708
Leon Clarkee46be812010-01-19 14:06:41 +00003709 // Push the receiver.
3710 __ ldr(r1, frame_->Receiver());
3711 frame_->EmitPush(r1);
3712
Steve Blocka7e24c12009-10-30 11:49:00 +00003713 // Resolve the call.
Leon Clarkee46be812010-01-19 14:06:41 +00003714 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
Steve Blocka7e24c12009-10-30 11:49:00 +00003715
3716 // Touch up stack with the right values for the function and the receiver.
Leon Clarkee46be812010-01-19 14:06:41 +00003717 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00003718 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3719
3720 // Call the function.
3721 CodeForSourcePosition(node->position());
3722
3723 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Leon Clarkee46be812010-01-19 14:06:41 +00003724 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003725 frame_->CallStub(&call_function, arg_count + 1);
3726
3727 __ ldr(cp, frame_->Context());
3728 // Remove the function from the stack.
3729 frame_->Drop();
3730 frame_->EmitPush(r0);
3731
3732 } else if (var != NULL && !var->is_this() && var->is_global()) {
3733 // ----------------------------------
3734 // JavaScript example: 'foo(1, 2, 3)' // foo is global
3735 // ----------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00003736 // Pass the global object as the receiver and let the IC stub
3737 // patch the stack to use the global proxy as 'this' in the
3738 // invoked function.
3739 LoadGlobal();
3740
3741 // Load the arguments.
3742 int arg_count = args->length();
3743 for (int i = 0; i < arg_count; i++) {
3744 LoadAndSpill(args->at(i));
3745 }
3746
Andrei Popescu402d9372010-02-26 13:31:12 +00003747 // Setup the name register and call the IC initialization code.
3748 __ mov(r2, Operand(var->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003749 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3750 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3751 CodeForSourcePosition(node->position());
3752 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3753 arg_count + 1);
3754 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00003755 frame_->EmitPush(r0);
3756
3757 } else if (var != NULL && var->slot() != NULL &&
3758 var->slot()->type() == Slot::LOOKUP) {
3759 // ----------------------------------
Kristian Monsen25f61362010-05-21 11:50:48 +01003760 // JavaScript examples:
3761 //
3762 // with (obj) foo(1, 2, 3) // foo may be in obj.
3763 //
3764 // function f() {};
3765 // function g() {
3766 // eval(...);
3767 // f(); // f could be in extension object.
3768 // }
Steve Blocka7e24c12009-10-30 11:49:00 +00003769 // ----------------------------------
3770
Kristian Monsen25f61362010-05-21 11:50:48 +01003771 // JumpTargets do not yet support merging frames so the frame must be
3772 // spilled when jumping to these targets.
3773 JumpTarget slow, done;
3774
3775 // Generate fast case for loading functions from slots that
3776 // correspond to local/global variables or arguments unless they
3777 // are shadowed by eval-introduced bindings.
3778 EmitDynamicLoadFromSlotFastCase(var->slot(),
3779 NOT_INSIDE_TYPEOF,
3780 &slow,
3781 &done);
3782
3783 slow.Bind();
Steve Blocka7e24c12009-10-30 11:49:00 +00003784 // Load the function
3785 frame_->EmitPush(cp);
3786 __ mov(r0, Operand(var->name()));
3787 frame_->EmitPush(r0);
3788 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3789 // r0: slot value; r1: receiver
3790
3791 // Load the receiver.
3792 frame_->EmitPush(r0); // function
3793 frame_->EmitPush(r1); // receiver
3794
Kristian Monsen25f61362010-05-21 11:50:48 +01003795 // If fast case code has been generated, emit code to push the
3796 // function and receiver and have the slow path jump around this
3797 // code.
3798 if (done.is_linked()) {
3799 JumpTarget call;
3800 call.Jump();
3801 done.Bind();
3802 frame_->EmitPush(r0); // function
3803 LoadGlobalReceiver(r1); // receiver
3804 call.Bind();
3805 }
3806
3807 // Call the function. At this point, everything is spilled but the
3808 // function and receiver are in r0 and r1.
Leon Clarkee46be812010-01-19 14:06:41 +00003809 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003810 frame_->EmitPush(r0);
3811
3812 } else if (property != NULL) {
3813 // Check if the key is a literal string.
3814 Literal* literal = property->key()->AsLiteral();
3815
3816 if (literal != NULL && literal->handle()->IsSymbol()) {
3817 // ------------------------------------------------------------------
3818 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
3819 // ------------------------------------------------------------------
3820
Steve Block6ded16b2010-05-10 14:33:55 +01003821 Handle<String> name = Handle<String>::cast(literal->handle());
Steve Blocka7e24c12009-10-30 11:49:00 +00003822
Steve Block6ded16b2010-05-10 14:33:55 +01003823 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
3824 name->IsEqualTo(CStrVector("apply")) &&
3825 args->length() == 2 &&
3826 args->at(1)->AsVariableProxy() != NULL &&
3827 args->at(1)->AsVariableProxy()->IsArguments()) {
3828 // Use the optimized Function.prototype.apply that avoids
3829 // allocating lazily allocated arguments objects.
3830 CallApplyLazy(property->obj(),
3831 args->at(0),
3832 args->at(1)->AsVariableProxy(),
3833 node->position());
3834
3835 } else {
3836 LoadAndSpill(property->obj()); // Receiver.
3837 // Load the arguments.
3838 int arg_count = args->length();
3839 for (int i = 0; i < arg_count; i++) {
3840 LoadAndSpill(args->at(i));
3841 }
3842
3843 // Set the name register and call the IC initialization code.
3844 __ mov(r2, Operand(name));
3845 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3846 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3847 CodeForSourcePosition(node->position());
3848 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
3849 __ ldr(cp, frame_->Context());
3850 frame_->EmitPush(r0);
3851 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003852
3853 } else {
3854 // -------------------------------------------
3855 // JavaScript example: 'array[index](1, 2, 3)'
3856 // -------------------------------------------
3857
Leon Clarked91b9f72010-01-27 17:25:45 +00003858 LoadAndSpill(property->obj());
Kristian Monsen25f61362010-05-21 11:50:48 +01003859 if (!property->is_synthetic()) {
3860 // Duplicate receiver for later use.
3861 __ ldr(r0, MemOperand(sp, 0));
3862 frame_->EmitPush(r0);
3863 }
Leon Clarked91b9f72010-01-27 17:25:45 +00003864 LoadAndSpill(property->key());
Steve Block6ded16b2010-05-10 14:33:55 +01003865 EmitKeyedLoad();
Leon Clarked91b9f72010-01-27 17:25:45 +00003866 // Put the function below the receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003867 if (property->is_synthetic()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00003868 // Use the global receiver.
Kristian Monsen25f61362010-05-21 11:50:48 +01003869 frame_->EmitPush(r0); // Function.
Steve Blocka7e24c12009-10-30 11:49:00 +00003870 LoadGlobalReceiver(r0);
3871 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01003872 // Switch receiver and function.
3873 frame_->EmitPop(r1); // Receiver.
3874 frame_->EmitPush(r0); // Function.
3875 frame_->EmitPush(r1); // Receiver.
Steve Blocka7e24c12009-10-30 11:49:00 +00003876 }
3877
3878 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003879 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003880 frame_->EmitPush(r0);
3881 }
3882
3883 } else {
3884 // ----------------------------------
3885 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
3886 // ----------------------------------
3887
3888 // Load the function.
3889 LoadAndSpill(function);
3890
3891 // Pass the global proxy as the receiver.
3892 LoadGlobalReceiver(r0);
3893
3894 // Call the function.
Leon Clarkee46be812010-01-19 14:06:41 +00003895 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
Steve Blocka7e24c12009-10-30 11:49:00 +00003896 frame_->EmitPush(r0);
3897 }
Steve Block6ded16b2010-05-10 14:33:55 +01003898 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003899}
3900
3901
3902void CodeGenerator::VisitCallNew(CallNew* node) {
3903#ifdef DEBUG
3904 int original_height = frame_->height();
3905#endif
Steve Block6ded16b2010-05-10 14:33:55 +01003906 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003907 Comment cmnt(masm_, "[ CallNew");
3908
3909 // According to ECMA-262, section 11.2.2, page 44, the function
3910 // expression in new calls must be evaluated before the
3911 // arguments. This is different from ordinary calls, where the
3912 // actual function to call is resolved after the arguments have been
3913 // evaluated.
3914
3915 // Compute function to call and use the global object as the
3916 // receiver. There is no need to use the global proxy here because
3917 // it will always be replaced with a newly allocated object.
3918 LoadAndSpill(node->expression());
3919 LoadGlobal();
3920
3921 // Push the arguments ("left-to-right") on the stack.
3922 ZoneList<Expression*>* args = node->arguments();
3923 int arg_count = args->length();
3924 for (int i = 0; i < arg_count; i++) {
3925 LoadAndSpill(args->at(i));
3926 }
3927
3928 // r0: the number of arguments.
Steve Blocka7e24c12009-10-30 11:49:00 +00003929 __ mov(r0, Operand(arg_count));
Steve Blocka7e24c12009-10-30 11:49:00 +00003930 // Load the function into r1 as per calling convention.
Steve Blocka7e24c12009-10-30 11:49:00 +00003931 __ ldr(r1, frame_->ElementAt(arg_count + 1));
3932
3933 // Call the construct call builtin that handles allocation and
3934 // constructor invocation.
3935 CodeForSourcePosition(node->position());
3936 Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
Leon Clarke4515c472010-02-03 11:58:03 +00003937 frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00003938
3939 // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
3940 __ str(r0, frame_->Top());
Steve Block6ded16b2010-05-10 14:33:55 +01003941 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00003942}
3943
3944
3945void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01003946 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003947 ASSERT(args->length() == 1);
3948 JumpTarget leave, null, function, non_function_constructor;
3949
3950 // Load the object into r0.
3951 LoadAndSpill(args->at(0));
3952 frame_->EmitPop(r0);
3953
3954 // If the object is a smi, we return null.
3955 __ tst(r0, Operand(kSmiTagMask));
3956 null.Branch(eq);
3957
3958 // Check that the object is a JS object but take special care of JS
3959 // functions to make sure they have 'Function' as their class.
3960 __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
3961 null.Branch(lt);
3962
3963 // As long as JS_FUNCTION_TYPE is the last instance type and it is
3964 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
3965 // LAST_JS_OBJECT_TYPE.
3966 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3967 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3968 __ cmp(r1, Operand(JS_FUNCTION_TYPE));
3969 function.Branch(eq);
3970
3971 // Check if the constructor in the map is a function.
3972 __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
3973 __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
3974 non_function_constructor.Branch(ne);
3975
3976 // The r0 register now contains the constructor function. Grab the
3977 // instance class name from there.
3978 __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
3979 __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
3980 frame_->EmitPush(r0);
3981 leave.Jump();
3982
3983 // Functions have class 'Function'.
3984 function.Bind();
3985 __ mov(r0, Operand(Factory::function_class_symbol()));
3986 frame_->EmitPush(r0);
3987 leave.Jump();
3988
3989 // Objects with a non-function constructor have class 'Object'.
3990 non_function_constructor.Bind();
3991 __ mov(r0, Operand(Factory::Object_symbol()));
3992 frame_->EmitPush(r0);
3993 leave.Jump();
3994
3995 // Non-JS objects have class null.
3996 null.Bind();
3997 __ LoadRoot(r0, Heap::kNullValueRootIndex);
3998 frame_->EmitPush(r0);
3999
4000 // All done.
4001 leave.Bind();
4002}
4003
4004
4005void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004006 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004007 ASSERT(args->length() == 1);
4008 JumpTarget leave;
4009 LoadAndSpill(args->at(0));
4010 frame_->EmitPop(r0); // r0 contains object.
4011 // if (object->IsSmi()) return the object.
4012 __ tst(r0, Operand(kSmiTagMask));
4013 leave.Branch(eq);
4014 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4015 __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
4016 leave.Branch(ne);
4017 // Load the value.
4018 __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
4019 leave.Bind();
4020 frame_->EmitPush(r0);
4021}
4022
4023
4024void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004025 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004026 ASSERT(args->length() == 2);
4027 JumpTarget leave;
4028 LoadAndSpill(args->at(0)); // Load the object.
4029 LoadAndSpill(args->at(1)); // Load the value.
4030 frame_->EmitPop(r0); // r0 contains value
4031 frame_->EmitPop(r1); // r1 contains object
4032 // if (object->IsSmi()) return object.
4033 __ tst(r1, Operand(kSmiTagMask));
4034 leave.Branch(eq);
4035 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4036 __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
4037 leave.Branch(ne);
4038 // Store the value.
4039 __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
4040 // Update the write barrier.
4041 __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
4042 __ RecordWrite(r1, r2, r3);
4043 // Leave.
4044 leave.Bind();
4045 frame_->EmitPush(r0);
4046}
4047
4048
4049void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004050 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004051 ASSERT(args->length() == 1);
4052 LoadAndSpill(args->at(0));
4053 frame_->EmitPop(r0);
4054 __ tst(r0, Operand(kSmiTagMask));
4055 cc_reg_ = eq;
4056}
4057
4058
4059void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004060 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004061 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4062 ASSERT_EQ(args->length(), 3);
4063#ifdef ENABLE_LOGGING_AND_PROFILING
4064 if (ShouldGenerateLog(args->at(0))) {
4065 LoadAndSpill(args->at(1));
4066 LoadAndSpill(args->at(2));
4067 __ CallRuntime(Runtime::kLog, 2);
4068 }
4069#endif
4070 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
4071 frame_->EmitPush(r0);
4072}
4073
4074
4075void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004076 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004077 ASSERT(args->length() == 1);
4078 LoadAndSpill(args->at(0));
4079 frame_->EmitPop(r0);
4080 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
4081 cc_reg_ = eq;
4082}
4083
4084
Steve Block6ded16b2010-05-10 14:33:55 +01004085// Generates the Math.pow method - currently just calls runtime.
4086void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4087 ASSERT(args->length() == 2);
4088 Load(args->at(0));
4089 Load(args->at(1));
4090 frame_->CallRuntime(Runtime::kMath_pow, 2);
4091 frame_->EmitPush(r0);
4092}
4093
4094
4095// Generates the Math.sqrt method - currently just calls runtime.
4096void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4097 ASSERT(args->length() == 1);
4098 Load(args->at(0));
4099 frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4100 frame_->EmitPush(r0);
4101}
4102
4103
4104// This generates code that performs a charCodeAt() call or returns
Steve Blocka7e24c12009-10-30 11:49:00 +00004105// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
Steve Block6ded16b2010-05-10 14:33:55 +01004106// It can handle flat, 8 and 16 bit characters and cons strings where the
4107// answer is found in the left hand branch of the cons. The slow case will
4108// flatten the string, which will ensure that the answer is in the left hand
4109// side the next time around.
Steve Blocka7e24c12009-10-30 11:49:00 +00004110void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004111 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004112 ASSERT(args->length() == 2);
Steve Blockd0582a62009-12-15 09:54:21 +00004113 Comment(masm_, "[ GenerateFastCharCodeAt");
4114
4115 LoadAndSpill(args->at(0));
4116 LoadAndSpill(args->at(1));
Steve Block6ded16b2010-05-10 14:33:55 +01004117 frame_->EmitPop(r1); // Index.
4118 frame_->EmitPop(r2); // String.
Steve Blockd0582a62009-12-15 09:54:21 +00004119
Steve Block6ded16b2010-05-10 14:33:55 +01004120 Label slow_case;
4121 Label exit;
4122 StringHelper::GenerateFastCharCodeAt(masm_,
4123 r2,
4124 r1,
4125 r3,
4126 r0,
4127 &slow_case,
4128 &slow_case,
4129 &slow_case,
4130 &slow_case);
4131 __ jmp(&exit);
Steve Blockd0582a62009-12-15 09:54:21 +00004132
Steve Block6ded16b2010-05-10 14:33:55 +01004133 __ bind(&slow_case);
4134 // Move the undefined value into the result register, which will
4135 // trigger the slow case.
Steve Blocka7e24c12009-10-30 11:49:00 +00004136 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Steve Blockd0582a62009-12-15 09:54:21 +00004137
Steve Block6ded16b2010-05-10 14:33:55 +01004138 __ bind(&exit);
Steve Blocka7e24c12009-10-30 11:49:00 +00004139 frame_->EmitPush(r0);
4140}
4141
4142
Steve Block6ded16b2010-05-10 14:33:55 +01004143void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
4144 Comment(masm_, "[ GenerateCharFromCode");
4145 ASSERT(args->length() == 1);
4146
4147 Register code = r1;
4148 Register scratch = ip;
4149 Register result = r0;
4150
4151 LoadAndSpill(args->at(0));
4152 frame_->EmitPop(code);
4153
4154 StringHelper::GenerateCharFromCode(masm_,
4155 code,
4156 scratch,
4157 result,
4158 CALL_FUNCTION);
4159 frame_->EmitPush(result);
4160}
4161
4162
Steve Blocka7e24c12009-10-30 11:49:00 +00004163void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004164 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004165 ASSERT(args->length() == 1);
4166 LoadAndSpill(args->at(0));
4167 JumpTarget answer;
4168 // We need the CC bits to come out as not_equal in the case where the
4169 // object is a smi. This can't be done with the usual test opcode so
4170 // we use XOR to get the right CC bits.
4171 frame_->EmitPop(r0);
4172 __ and_(r1, r0, Operand(kSmiTagMask));
4173 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
4174 answer.Branch(ne);
4175 // It is a heap object - get the map. Check if the object is a JS array.
4176 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4177 answer.Bind();
4178 cc_reg_ = eq;
4179}
4180
4181
Andrei Popescu402d9372010-02-26 13:31:12 +00004182void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004183 VirtualFrame::SpilledScope spilled_scope(frame_);
Andrei Popescu402d9372010-02-26 13:31:12 +00004184 ASSERT(args->length() == 1);
4185 LoadAndSpill(args->at(0));
4186 JumpTarget answer;
4187 // We need the CC bits to come out as not_equal in the case where the
4188 // object is a smi. This can't be done with the usual test opcode so
4189 // we use XOR to get the right CC bits.
4190 frame_->EmitPop(r0);
4191 __ and_(r1, r0, Operand(kSmiTagMask));
4192 __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
4193 answer.Branch(ne);
4194 // It is a heap object - get the map. Check if the object is a regexp.
4195 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4196 answer.Bind();
4197 cc_reg_ = eq;
4198}
4199
4200
Steve Blockd0582a62009-12-15 09:54:21 +00004201void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
4202 // This generates a fast version of:
4203 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
Steve Block6ded16b2010-05-10 14:33:55 +01004204 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00004205 ASSERT(args->length() == 1);
4206 LoadAndSpill(args->at(0));
4207 frame_->EmitPop(r1);
4208 __ tst(r1, Operand(kSmiTagMask));
4209 false_target()->Branch(eq);
4210
4211 __ LoadRoot(ip, Heap::kNullValueRootIndex);
4212 __ cmp(r1, ip);
4213 true_target()->Branch(eq);
4214
4215 Register map_reg = r2;
4216 __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
4217 // Undetectable objects behave like undefined when tested with typeof.
4218 __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
4219 __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
4220 __ cmp(r1, Operand(1 << Map::kIsUndetectable));
4221 false_target()->Branch(eq);
4222
4223 __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4224 __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
4225 false_target()->Branch(lt);
4226 __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
4227 cc_reg_ = le;
4228}
4229
4230
4231void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
4232 // This generates a fast version of:
4233 // (%_ClassOf(arg) === 'Function')
Steve Block6ded16b2010-05-10 14:33:55 +01004234 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00004235 ASSERT(args->length() == 1);
4236 LoadAndSpill(args->at(0));
4237 frame_->EmitPop(r0);
4238 __ tst(r0, Operand(kSmiTagMask));
4239 false_target()->Branch(eq);
4240 Register map_reg = r2;
4241 __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
4242 cc_reg_ = eq;
4243}
4244
4245
Leon Clarked91b9f72010-01-27 17:25:45 +00004246void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004247 VirtualFrame::SpilledScope spilled_scope(frame_);
Leon Clarked91b9f72010-01-27 17:25:45 +00004248 ASSERT(args->length() == 1);
4249 LoadAndSpill(args->at(0));
4250 frame_->EmitPop(r0);
4251 __ tst(r0, Operand(kSmiTagMask));
4252 false_target()->Branch(eq);
4253 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
4254 __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
4255 __ tst(r1, Operand(1 << Map::kIsUndetectable));
4256 cc_reg_ = ne;
4257}
4258
4259
Steve Blocka7e24c12009-10-30 11:49:00 +00004260void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004261 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004262 ASSERT(args->length() == 0);
4263
4264 // Get the frame pointer for the calling frame.
4265 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4266
4267 // Skip the arguments adaptor frame if it exists.
4268 Label check_frame_marker;
4269 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
4270 __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4271 __ b(ne, &check_frame_marker);
4272 __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
4273
4274 // Check the marker in the calling frame.
4275 __ bind(&check_frame_marker);
4276 __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
4277 __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4278 cc_reg_ = eq;
4279}
4280
4281
4282void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004283 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004284 ASSERT(args->length() == 0);
4285
Steve Block6ded16b2010-05-10 14:33:55 +01004286 Label exit;
4287
4288 // Get the number of formal parameters.
Andrei Popescu31002712010-02-23 13:46:05 +00004289 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00004290
Steve Block6ded16b2010-05-10 14:33:55 +01004291 // Check if the calling frame is an arguments adaptor frame.
4292 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4293 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4294 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4295 __ b(ne, &exit);
4296
4297 // Arguments adaptor case: Read the arguments length from the
4298 // adaptor frame.
4299 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4300
4301 __ bind(&exit);
Steve Blocka7e24c12009-10-30 11:49:00 +00004302 frame_->EmitPush(r0);
4303}
4304
4305
Steve Block6ded16b2010-05-10 14:33:55 +01004306void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
4307 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004308 ASSERT(args->length() == 1);
4309
4310 // Satisfy contract with ArgumentsAccessStub:
4311 // Load the key into r1 and the formal parameters count into r0.
4312 LoadAndSpill(args->at(0));
4313 frame_->EmitPop(r1);
Andrei Popescu31002712010-02-23 13:46:05 +00004314 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Steve Blocka7e24c12009-10-30 11:49:00 +00004315
4316 // Call the shared stub to get to arguments[key].
4317 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
4318 frame_->CallStub(&stub, 0);
4319 frame_->EmitPush(r0);
4320}
4321
4322
Steve Block6ded16b2010-05-10 14:33:55 +01004323void CodeGenerator::GenerateRandomHeapNumber(
4324 ZoneList<Expression*>* args) {
4325 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004326 ASSERT(args->length() == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01004327
4328 Label slow_allocate_heapnumber;
4329 Label heapnumber_allocated;
4330
4331 __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
4332 __ jmp(&heapnumber_allocated);
4333
4334 __ bind(&slow_allocate_heapnumber);
4335 // To allocate a heap number, and ensure that it is not a smi, we
4336 // call the runtime function FUnaryMinus on 0, returning the double
4337 // -0.0. A new, distinct heap number is returned each time.
4338 __ mov(r0, Operand(Smi::FromInt(0)));
4339 __ push(r0);
4340 __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4341 __ mov(r4, Operand(r0));
4342
4343 __ bind(&heapnumber_allocated);
4344
4345 // Convert 32 random bits in r0 to 0.(32 random bits) in a double
4346 // by computing:
4347 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4348 if (CpuFeatures::IsSupported(VFP3)) {
4349 __ PrepareCallCFunction(0, r1);
4350 __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4351
4352 CpuFeatures::Scope scope(VFP3);
4353 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
4354 // Create this constant using mov/orr to avoid PC relative load.
4355 __ mov(r1, Operand(0x41000000));
4356 __ orr(r1, r1, Operand(0x300000));
4357 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
4358 __ vmov(d7, r0, r1);
4359 // Move 0x4130000000000000 to VFP.
4360 __ mov(r0, Operand(0));
4361 __ vmov(d8, r0, r1);
4362 // Subtract and store the result in the heap number.
4363 __ vsub(d7, d7, d8);
4364 __ sub(r0, r4, Operand(kHeapObjectTag));
4365 __ vstr(d7, r0, HeapNumber::kValueOffset);
4366 frame_->EmitPush(r4);
4367 } else {
4368 __ mov(r0, Operand(r4));
4369 __ PrepareCallCFunction(1, r1);
4370 __ CallCFunction(
4371 ExternalReference::fill_heap_number_with_random_function(), 1);
4372 frame_->EmitPush(r0);
4373 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004374}
4375
4376
Steve Blockd0582a62009-12-15 09:54:21 +00004377void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4378 ASSERT_EQ(2, args->length());
4379
4380 Load(args->at(0));
4381 Load(args->at(1));
4382
Andrei Popescu31002712010-02-23 13:46:05 +00004383 StringAddStub stub(NO_STRING_ADD_FLAGS);
4384 frame_->CallStub(&stub, 2);
Steve Blockd0582a62009-12-15 09:54:21 +00004385 frame_->EmitPush(r0);
4386}
4387
4388
Leon Clarkee46be812010-01-19 14:06:41 +00004389void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4390 ASSERT_EQ(3, args->length());
4391
4392 Load(args->at(0));
4393 Load(args->at(1));
4394 Load(args->at(2));
4395
Andrei Popescu31002712010-02-23 13:46:05 +00004396 SubStringStub stub;
4397 frame_->CallStub(&stub, 3);
Leon Clarkee46be812010-01-19 14:06:41 +00004398 frame_->EmitPush(r0);
4399}
4400
4401
4402void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4403 ASSERT_EQ(2, args->length());
4404
4405 Load(args->at(0));
4406 Load(args->at(1));
4407
Leon Clarked91b9f72010-01-27 17:25:45 +00004408 StringCompareStub stub;
4409 frame_->CallStub(&stub, 2);
Leon Clarkee46be812010-01-19 14:06:41 +00004410 frame_->EmitPush(r0);
4411}
4412
4413
4414void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4415 ASSERT_EQ(4, args->length());
4416
4417 Load(args->at(0));
4418 Load(args->at(1));
4419 Load(args->at(2));
4420 Load(args->at(3));
Steve Block6ded16b2010-05-10 14:33:55 +01004421 RegExpExecStub stub;
4422 frame_->CallStub(&stub, 4);
4423 frame_->EmitPush(r0);
4424}
Leon Clarkee46be812010-01-19 14:06:41 +00004425
Steve Block6ded16b2010-05-10 14:33:55 +01004426
4427void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4428 // No stub. This code only occurs a few times in regexp.js.
4429 const int kMaxInlineLength = 100;
4430 ASSERT_EQ(3, args->length());
4431 Load(args->at(0)); // Size of array, smi.
4432 Load(args->at(1)); // "index" property value.
4433 Load(args->at(2)); // "input" property value.
4434 {
4435 VirtualFrame::SpilledScope spilled_scope(frame_);
4436 Label slowcase;
4437 Label done;
4438 __ ldr(r1, MemOperand(sp, kPointerSize * 2));
4439 STATIC_ASSERT(kSmiTag == 0);
4440 STATIC_ASSERT(kSmiTagSize == 1);
4441 __ tst(r1, Operand(kSmiTagMask));
4442 __ b(ne, &slowcase);
4443 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
4444 __ b(hi, &slowcase);
4445 // Smi-tagging is equivalent to multiplying by 2.
4446 // Allocate RegExpResult followed by FixedArray with size in ebx.
4447 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4448 // Elements: [Map][Length][..elements..]
4449 // Size of JSArray with two in-object properties and the header of a
4450 // FixedArray.
4451 int objects_size =
4452 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4453 __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
4454 __ add(r2, r5, Operand(objects_size));
Kristian Monsen25f61362010-05-21 11:50:48 +01004455 __ AllocateInNewSpace(
4456 r2, // In: Size, in words.
4457 r0, // Out: Start of allocation (tagged).
4458 r3, // Scratch register.
4459 r4, // Scratch register.
4460 &slowcase,
4461 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
Steve Block6ded16b2010-05-10 14:33:55 +01004462 // r0: Start of allocated area, object-tagged.
4463 // r1: Number of elements in array, as smi.
4464 // r5: Number of elements, untagged.
4465
4466 // Set JSArray map to global.regexp_result_map().
4467 // Set empty properties FixedArray.
4468 // Set elements to point to FixedArray allocated right after the JSArray.
4469 // Interleave operations for better latency.
4470 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
4471 __ add(r3, r0, Operand(JSRegExpResult::kSize));
4472 __ mov(r4, Operand(Factory::empty_fixed_array()));
4473 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4474 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
4475 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
4476 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
4477 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4478
4479 // Set input, index and length fields from arguments.
4480 __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
4481 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
4482 __ add(sp, sp, Operand(kPointerSize));
4483 __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
4484 __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
4485
4486 // Fill out the elements FixedArray.
4487 // r0: JSArray, tagged.
4488 // r3: FixedArray, tagged.
4489 // r5: Number of elements in array, untagged.
4490
4491 // Set map.
4492 __ mov(r2, Operand(Factory::fixed_array_map()));
4493 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
4494 // Set FixedArray length.
4495 __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
4496 // Fill contents of fixed-array with the-hole.
4497 __ mov(r2, Operand(Factory::the_hole_value()));
4498 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4499 // Fill fixed array elements with hole.
4500 // r0: JSArray, tagged.
4501 // r2: the hole.
4502 // r3: Start of elements in FixedArray.
4503 // r5: Number of elements to fill.
4504 Label loop;
4505 __ tst(r5, Operand(r5));
4506 __ bind(&loop);
4507 __ b(le, &done); // Jump if r1 is negative or zero.
4508 __ sub(r5, r5, Operand(1), SetCC);
4509 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
4510 __ jmp(&loop);
4511
4512 __ bind(&slowcase);
4513 __ CallRuntime(Runtime::kRegExpConstructResult, 3);
4514
4515 __ bind(&done);
4516 }
4517 frame_->Forget(3);
4518 frame_->EmitPush(r0);
4519}
4520
4521
4522class DeferredSearchCache: public DeferredCode {
4523 public:
4524 DeferredSearchCache(Register dst, Register cache, Register key)
4525 : dst_(dst), cache_(cache), key_(key) {
4526 set_comment("[ DeferredSearchCache");
4527 }
4528
4529 virtual void Generate();
4530
4531 private:
4532 Register dst_, cache_, key_;
4533};
4534
4535
4536void DeferredSearchCache::Generate() {
4537 __ Push(cache_, key_);
4538 __ CallRuntime(Runtime::kGetFromCache, 2);
4539 if (!dst_.is(r0)) {
4540 __ mov(dst_, r0);
4541 }
4542}
4543
4544
4545void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
4546 ASSERT_EQ(2, args->length());
4547
4548 ASSERT_NE(NULL, args->at(0)->AsLiteral());
4549 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
4550
4551 Handle<FixedArray> jsfunction_result_caches(
4552 Top::global_context()->jsfunction_result_caches());
4553 if (jsfunction_result_caches->length() <= cache_id) {
4554 __ Abort("Attempt to use undefined cache.");
4555 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
4556 frame_->EmitPush(r0);
4557 return;
4558 }
4559
4560 Load(args->at(1));
4561 frame_->EmitPop(r2);
4562
4563 __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
4564 __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
4565 __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
4566 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
4567
4568 DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
4569
4570 const int kFingerOffset =
4571 FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
4572 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4573 __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
4574 // r0 now holds finger offset as a smi.
4575 __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4576 // r3 now points to the start of fixed array elements.
4577 __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
4578 // Note side effect of PreIndex: r3 now points to the key of the pair.
4579 __ cmp(r2, r0);
4580 deferred->Branch(ne);
4581
4582 __ ldr(r0, MemOperand(r3, kPointerSize));
4583
4584 deferred->BindExit();
Leon Clarkee46be812010-01-19 14:06:41 +00004585 frame_->EmitPush(r0);
4586}
4587
4588
Andrei Popescu402d9372010-02-26 13:31:12 +00004589void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
4590 ASSERT_EQ(args->length(), 1);
4591
4592 // Load the argument on the stack and jump to the runtime.
4593 Load(args->at(0));
4594
Steve Block6ded16b2010-05-10 14:33:55 +01004595 NumberToStringStub stub;
4596 frame_->CallStub(&stub, 1);
4597 frame_->EmitPush(r0);
4598}
4599
4600
4601class DeferredSwapElements: public DeferredCode {
4602 public:
4603 DeferredSwapElements(Register object, Register index1, Register index2)
4604 : object_(object), index1_(index1), index2_(index2) {
4605 set_comment("[ DeferredSwapElements");
4606 }
4607
4608 virtual void Generate();
4609
4610 private:
4611 Register object_, index1_, index2_;
4612};
4613
4614
4615void DeferredSwapElements::Generate() {
4616 __ push(object_);
4617 __ push(index1_);
4618 __ push(index2_);
4619 __ CallRuntime(Runtime::kSwapElements, 3);
4620}
4621
4622
4623void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
4624 Comment cmnt(masm_, "[ GenerateSwapElements");
4625
4626 ASSERT_EQ(3, args->length());
4627
4628 Load(args->at(0));
4629 Load(args->at(1));
4630 Load(args->at(2));
4631
4632 Register index2 = r2;
4633 Register index1 = r1;
4634 Register object = r0;
4635 Register tmp1 = r3;
4636 Register tmp2 = r4;
4637
4638 frame_->EmitPop(index2);
4639 frame_->EmitPop(index1);
4640 frame_->EmitPop(object);
4641
4642 DeferredSwapElements* deferred =
4643 new DeferredSwapElements(object, index1, index2);
4644
4645 // Fetch the map and check if array is in fast case.
4646 // Check that object doesn't require security checks and
4647 // has no indexed interceptor.
4648 __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
4649 deferred->Branch(lt);
4650 __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
4651 __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
4652 deferred->Branch(nz);
4653
4654 // Check the object's elements are in fast case.
4655 __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
4656 __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
4657 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
4658 __ cmp(tmp2, ip);
4659 deferred->Branch(ne);
4660
4661 // Smi-tagging is equivalent to multiplying by 2.
4662 STATIC_ASSERT(kSmiTag == 0);
4663 STATIC_ASSERT(kSmiTagSize == 1);
4664
4665 // Check that both indices are smis.
4666 __ mov(tmp2, index1);
4667 __ orr(tmp2, tmp2, index2);
4668 __ tst(tmp2, Operand(kSmiTagMask));
4669 deferred->Branch(nz);
4670
4671 // Bring the offsets into the fixed array in tmp1 into index1 and
4672 // index2.
4673 __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4674 __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
4675 __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
4676
4677 // Swap elements.
4678 Register tmp3 = object;
4679 object = no_reg;
4680 __ ldr(tmp3, MemOperand(tmp1, index1));
4681 __ ldr(tmp2, MemOperand(tmp1, index2));
4682 __ str(tmp3, MemOperand(tmp1, index2));
4683 __ str(tmp2, MemOperand(tmp1, index1));
4684
4685 Label done;
4686 __ InNewSpace(tmp1, tmp2, eq, &done);
4687 // Possible optimization: do a check that both values are Smis
4688 // (or them and test against Smi mask.)
4689
4690 __ mov(tmp2, tmp1);
4691 RecordWriteStub recordWrite1(tmp1, index1, tmp3);
4692 __ CallStub(&recordWrite1);
4693
4694 RecordWriteStub recordWrite2(tmp2, index2, tmp3);
4695 __ CallStub(&recordWrite2);
4696
4697 __ bind(&done);
4698
4699 deferred->BindExit();
4700 __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
4701 frame_->EmitPush(tmp1);
4702}
4703
4704
4705void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
4706 Comment cmnt(masm_, "[ GenerateCallFunction");
4707
4708 ASSERT(args->length() >= 2);
4709
4710 int n_args = args->length() - 2; // for receiver and function.
4711 Load(args->at(0)); // receiver
4712 for (int i = 0; i < n_args; i++) {
4713 Load(args->at(i + 1));
4714 }
4715 Load(args->at(n_args + 1)); // function
4716 frame_->CallJSFunction(n_args);
Andrei Popescu402d9372010-02-26 13:31:12 +00004717 frame_->EmitPush(r0);
4718}
4719
4720
4721void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
4722 ASSERT_EQ(args->length(), 1);
4723 // Load the argument on the stack and jump to the runtime.
4724 Load(args->at(0));
4725 frame_->CallRuntime(Runtime::kMath_sin, 1);
4726 frame_->EmitPush(r0);
4727}
4728
4729
4730void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
4731 ASSERT_EQ(args->length(), 1);
4732 // Load the argument on the stack and jump to the runtime.
4733 Load(args->at(0));
4734 frame_->CallRuntime(Runtime::kMath_cos, 1);
4735 frame_->EmitPush(r0);
4736}
4737
4738
Steve Blocka7e24c12009-10-30 11:49:00 +00004739void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
Steve Block6ded16b2010-05-10 14:33:55 +01004740 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004741 ASSERT(args->length() == 2);
4742
4743 // Load the two objects into registers and perform the comparison.
4744 LoadAndSpill(args->at(0));
4745 LoadAndSpill(args->at(1));
4746 frame_->EmitPop(r0);
4747 frame_->EmitPop(r1);
Steve Block6ded16b2010-05-10 14:33:55 +01004748 __ cmp(r0, r1);
Steve Blocka7e24c12009-10-30 11:49:00 +00004749 cc_reg_ = eq;
4750}
4751
4752
4753void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
4754#ifdef DEBUG
4755 int original_height = frame_->height();
4756#endif
Steve Block6ded16b2010-05-10 14:33:55 +01004757 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004758 if (CheckForInlineRuntimeCall(node)) {
4759 ASSERT((has_cc() && frame_->height() == original_height) ||
4760 (!has_cc() && frame_->height() == original_height + 1));
4761 return;
4762 }
4763
4764 ZoneList<Expression*>* args = node->arguments();
4765 Comment cmnt(masm_, "[ CallRuntime");
4766 Runtime::Function* function = node->function();
4767
4768 if (function == NULL) {
4769 // Prepare stack for calling JS runtime function.
Steve Blocka7e24c12009-10-30 11:49:00 +00004770 // Push the builtins object found in the current global object.
4771 __ ldr(r1, GlobalObject());
4772 __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
4773 frame_->EmitPush(r0);
4774 }
4775
4776 // Push the arguments ("left-to-right").
4777 int arg_count = args->length();
4778 for (int i = 0; i < arg_count; i++) {
4779 LoadAndSpill(args->at(i));
4780 }
4781
4782 if (function == NULL) {
4783 // Call the JS runtime function.
Andrei Popescu402d9372010-02-26 13:31:12 +00004784 __ mov(r2, Operand(node->name()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004785 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4786 Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4787 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4788 __ ldr(cp, frame_->Context());
Steve Blocka7e24c12009-10-30 11:49:00 +00004789 frame_->EmitPush(r0);
4790 } else {
4791 // Call the C runtime function.
4792 frame_->CallRuntime(function, arg_count);
4793 frame_->EmitPush(r0);
4794 }
Steve Block6ded16b2010-05-10 14:33:55 +01004795 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00004796}
4797
4798
4799void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
4800#ifdef DEBUG
4801 int original_height = frame_->height();
4802#endif
Steve Block6ded16b2010-05-10 14:33:55 +01004803 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004804 Comment cmnt(masm_, "[ UnaryOperation");
4805
4806 Token::Value op = node->op();
4807
4808 if (op == Token::NOT) {
4809 LoadConditionAndSpill(node->expression(),
Steve Blocka7e24c12009-10-30 11:49:00 +00004810 false_target(),
4811 true_target(),
4812 true);
4813 // LoadCondition may (and usually does) leave a test and branch to
4814 // be emitted by the caller. In that case, negate the condition.
4815 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
4816
4817 } else if (op == Token::DELETE) {
4818 Property* property = node->expression()->AsProperty();
4819 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
4820 if (property != NULL) {
4821 LoadAndSpill(property->obj());
4822 LoadAndSpill(property->key());
Steve Blockd0582a62009-12-15 09:54:21 +00004823 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004824
4825 } else if (variable != NULL) {
4826 Slot* slot = variable->slot();
4827 if (variable->is_global()) {
4828 LoadGlobal();
4829 __ mov(r0, Operand(variable->name()));
4830 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004831 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004832
4833 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
4834 // lookup the context holding the named variable
4835 frame_->EmitPush(cp);
4836 __ mov(r0, Operand(variable->name()));
4837 frame_->EmitPush(r0);
4838 frame_->CallRuntime(Runtime::kLookupContext, 2);
4839 // r0: context
4840 frame_->EmitPush(r0);
4841 __ mov(r0, Operand(variable->name()));
4842 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004843 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00004844
4845 } else {
4846 // Default: Result of deleting non-global, not dynamically
4847 // introduced variables is false.
4848 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4849 }
4850
4851 } else {
4852 // Default: Result of deleting expressions is true.
4853 LoadAndSpill(node->expression()); // may have side-effects
4854 frame_->Drop();
4855 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4856 }
4857 frame_->EmitPush(r0);
4858
4859 } else if (op == Token::TYPEOF) {
4860 // Special case for loading the typeof expression; see comment on
4861 // LoadTypeofExpression().
4862 LoadTypeofExpression(node->expression());
4863 frame_->CallRuntime(Runtime::kTypeof, 1);
4864 frame_->EmitPush(r0); // r0 has result
4865
4866 } else {
Leon Clarke4515c472010-02-03 11:58:03 +00004867 bool overwrite =
4868 (node->expression()->AsBinaryOperation() != NULL &&
4869 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00004870 LoadAndSpill(node->expression());
4871 frame_->EmitPop(r0);
4872 switch (op) {
4873 case Token::NOT:
4874 case Token::DELETE:
4875 case Token::TYPEOF:
4876 UNREACHABLE(); // handled above
4877 break;
4878
4879 case Token::SUB: {
Leon Clarkee46be812010-01-19 14:06:41 +00004880 GenericUnaryOpStub stub(Token::SUB, overwrite);
Steve Blocka7e24c12009-10-30 11:49:00 +00004881 frame_->CallStub(&stub, 0);
4882 break;
4883 }
4884
4885 case Token::BIT_NOT: {
4886 // smi check
4887 JumpTarget smi_label;
4888 JumpTarget continue_label;
4889 __ tst(r0, Operand(kSmiTagMask));
4890 smi_label.Branch(eq);
4891
Leon Clarke4515c472010-02-03 11:58:03 +00004892 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
4893 frame_->CallStub(&stub, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00004894 continue_label.Jump();
Leon Clarke4515c472010-02-03 11:58:03 +00004895
Steve Blocka7e24c12009-10-30 11:49:00 +00004896 smi_label.Bind();
4897 __ mvn(r0, Operand(r0));
4898 __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
4899 continue_label.Bind();
4900 break;
4901 }
4902
4903 case Token::VOID:
4904 // since the stack top is cached in r0, popping and then
4905 // pushing a value can be done by just writing to r0.
4906 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
4907 break;
4908
4909 case Token::ADD: {
4910 // Smi check.
4911 JumpTarget continue_label;
4912 __ tst(r0, Operand(kSmiTagMask));
4913 continue_label.Branch(eq);
4914 frame_->EmitPush(r0);
Steve Blockd0582a62009-12-15 09:54:21 +00004915 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00004916 continue_label.Bind();
4917 break;
4918 }
4919 default:
4920 UNREACHABLE();
4921 }
4922 frame_->EmitPush(r0); // r0 has result
4923 }
4924 ASSERT(!has_valid_frame() ||
4925 (has_cc() && frame_->height() == original_height) ||
4926 (!has_cc() && frame_->height() == original_height + 1));
4927}
4928
4929
4930void CodeGenerator::VisitCountOperation(CountOperation* node) {
4931#ifdef DEBUG
4932 int original_height = frame_->height();
4933#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004934 Comment cmnt(masm_, "[ CountOperation");
4935
4936 bool is_postfix = node->is_postfix();
4937 bool is_increment = node->op() == Token::INC;
4938
4939 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
4940 bool is_const = (var != NULL && var->mode() == Variable::CONST);
4941
Steve Blocka7e24c12009-10-30 11:49:00 +00004942 if (is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004943 frame_->EmitPush(Operand(Smi::FromInt(0)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004944 }
4945
Leon Clarked91b9f72010-01-27 17:25:45 +00004946 // A constant reference is not saved to, so a constant reference is not a
4947 // compound assignment reference.
4948 { Reference target(this, node->expression(), !is_const);
Steve Blocka7e24c12009-10-30 11:49:00 +00004949 if (target.is_illegal()) {
4950 // Spoof the virtual frame to have the expected height (one higher
4951 // than on entry).
4952 if (!is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004953 frame_->EmitPush(Operand(Smi::FromInt(0)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004954 }
Steve Block6ded16b2010-05-10 14:33:55 +01004955 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00004956 return;
4957 }
Kristian Monsen25f61362010-05-21 11:50:48 +01004958 // This pushes 0, 1 or 2 words on the object to be used later when updating
4959 // the target. It also pushes the current value of the target.
Steve Block6ded16b2010-05-10 14:33:55 +01004960 target.GetValue();
Steve Blocka7e24c12009-10-30 11:49:00 +00004961
4962 JumpTarget slow;
4963 JumpTarget exit;
4964
Steve Blocka7e24c12009-10-30 11:49:00 +00004965 // Check for smi operand.
Kristian Monsen25f61362010-05-21 11:50:48 +01004966 Register value = frame_->PopToRegister();
4967 __ tst(value, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00004968 slow.Branch(ne);
4969
4970 // Postfix: Store the old value as the result.
4971 if (is_postfix) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004972 frame_->SetElementAt(value, target.size());
Steve Blocka7e24c12009-10-30 11:49:00 +00004973 }
4974
4975 // Perform optimistic increment/decrement.
4976 if (is_increment) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004977 __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004978 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01004979 __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00004980 }
4981
4982 // If the increment/decrement didn't overflow, we're done.
4983 exit.Branch(vc);
4984
4985 // Revert optimistic increment/decrement.
4986 if (is_increment) {
Kristian Monsen25f61362010-05-21 11:50:48 +01004987 __ sub(value, value, Operand(Smi::FromInt(1)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004988 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01004989 __ add(value, value, Operand(Smi::FromInt(1)));
Steve Blocka7e24c12009-10-30 11:49:00 +00004990 }
4991
Kristian Monsen25f61362010-05-21 11:50:48 +01004992 // Slow case: Convert to number. At this point the
4993 // value to be incremented is in the value register..
Steve Blocka7e24c12009-10-30 11:49:00 +00004994 slow.Bind();
Kristian Monsen25f61362010-05-21 11:50:48 +01004995
4996 // Convert the operand to a number.
4997 frame_->EmitPush(value);
4998
Steve Blocka7e24c12009-10-30 11:49:00 +00004999 {
Kristian Monsen25f61362010-05-21 11:50:48 +01005000 VirtualFrame::SpilledScope spilled(frame_);
Steve Blockd0582a62009-12-15 09:54:21 +00005001 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01005002
5003 if (is_postfix) {
5004 // Postfix: store to result (on the stack).
5005 __ str(r0, frame_->ElementAt(target.size()));
5006 }
5007
5008 // Compute the new value.
5009 frame_->EmitPush(r0);
5010 frame_->EmitPush(Operand(Smi::FromInt(1)));
5011 if (is_increment) {
5012 frame_->CallRuntime(Runtime::kNumberAdd, 2);
5013 } else {
5014 frame_->CallRuntime(Runtime::kNumberSub, 2);
5015 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005016 }
5017
Kristian Monsen25f61362010-05-21 11:50:48 +01005018 __ Move(value, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00005019 // Store the new value in the target if not const.
Kristian Monsen25f61362010-05-21 11:50:48 +01005020 // At this point the answer is in the value register.
Steve Blocka7e24c12009-10-30 11:49:00 +00005021 exit.Bind();
Kristian Monsen25f61362010-05-21 11:50:48 +01005022 frame_->EmitPush(value);
5023 // Set the target with the result, leaving the result on
5024 // top of the stack. Removes the target from the stack if
5025 // it has a non-zero size.
Steve Blocka7e24c12009-10-30 11:49:00 +00005026 if (!is_const) target.SetValue(NOT_CONST_INIT);
5027 }
5028
5029 // Postfix: Discard the new value and use the old.
Kristian Monsen25f61362010-05-21 11:50:48 +01005030 if (is_postfix) frame_->Pop();
Steve Block6ded16b2010-05-10 14:33:55 +01005031 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00005032}
5033
5034
Steve Block6ded16b2010-05-10 14:33:55 +01005035void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005036 // According to ECMA-262 section 11.11, page 58, the binary logical
5037 // operators must yield the result of one of the two expressions
5038 // before any ToBoolean() conversions. This means that the value
5039 // produced by a && or || operator is not necessarily a boolean.
5040
5041 // NOTE: If the left hand side produces a materialized value (not in
5042 // the CC register), we force the right hand side to do the
5043 // same. This is necessary because we may have to branch to the exit
5044 // after evaluating the left hand side (due to the shortcut
5045 // semantics), but the compiler must (statically) know if the result
5046 // of compiling the binary operation is materialized or not.
Steve Block6ded16b2010-05-10 14:33:55 +01005047 if (node->op() == Token::AND) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005048 JumpTarget is_true;
5049 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005050 &is_true,
5051 false_target(),
5052 false);
5053 if (has_valid_frame() && !has_cc()) {
5054 // The left-hand side result is on top of the virtual frame.
5055 JumpTarget pop_and_continue;
5056 JumpTarget exit;
5057
5058 __ ldr(r0, frame_->Top()); // Duplicate the stack top.
5059 frame_->EmitPush(r0);
5060 // Avoid popping the result if it converts to 'false' using the
5061 // standard ToBoolean() conversion as described in ECMA-262,
5062 // section 9.2, page 30.
5063 ToBoolean(&pop_and_continue, &exit);
5064 Branch(false, &exit);
5065
5066 // Pop the result of evaluating the first part.
5067 pop_and_continue.Bind();
5068 frame_->EmitPop(r0);
5069
5070 // Evaluate right side expression.
5071 is_true.Bind();
5072 LoadAndSpill(node->right());
5073
5074 // Exit (always with a materialized value).
5075 exit.Bind();
5076 } else if (has_cc() || is_true.is_linked()) {
5077 // The left-hand side is either (a) partially compiled to
5078 // control flow with a final branch left to emit or (b) fully
5079 // compiled to control flow and possibly true.
5080 if (has_cc()) {
5081 Branch(false, false_target());
5082 }
5083 is_true.Bind();
5084 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005085 true_target(),
5086 false_target(),
5087 false);
5088 } else {
5089 // Nothing to do.
5090 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
5091 }
5092
Steve Block6ded16b2010-05-10 14:33:55 +01005093 } else {
5094 ASSERT(node->op() == Token::OR);
Steve Blocka7e24c12009-10-30 11:49:00 +00005095 JumpTarget is_false;
5096 LoadConditionAndSpill(node->left(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005097 true_target(),
5098 &is_false,
5099 false);
5100 if (has_valid_frame() && !has_cc()) {
5101 // The left-hand side result is on top of the virtual frame.
5102 JumpTarget pop_and_continue;
5103 JumpTarget exit;
5104
5105 __ ldr(r0, frame_->Top());
5106 frame_->EmitPush(r0);
5107 // Avoid popping the result if it converts to 'true' using the
5108 // standard ToBoolean() conversion as described in ECMA-262,
5109 // section 9.2, page 30.
5110 ToBoolean(&exit, &pop_and_continue);
5111 Branch(true, &exit);
5112
5113 // Pop the result of evaluating the first part.
5114 pop_and_continue.Bind();
5115 frame_->EmitPop(r0);
5116
5117 // Evaluate right side expression.
5118 is_false.Bind();
5119 LoadAndSpill(node->right());
5120
5121 // Exit (always with a materialized value).
5122 exit.Bind();
5123 } else if (has_cc() || is_false.is_linked()) {
5124 // The left-hand side is either (a) partially compiled to
5125 // control flow with a final branch left to emit or (b) fully
5126 // compiled to control flow and possibly false.
5127 if (has_cc()) {
5128 Branch(true, true_target());
5129 }
5130 is_false.Bind();
5131 LoadConditionAndSpill(node->right(),
Steve Blocka7e24c12009-10-30 11:49:00 +00005132 true_target(),
5133 false_target(),
5134 false);
5135 } else {
5136 // Nothing to do.
5137 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
5138 }
Steve Block6ded16b2010-05-10 14:33:55 +01005139 }
5140}
Steve Blocka7e24c12009-10-30 11:49:00 +00005141
Steve Block6ded16b2010-05-10 14:33:55 +01005142
5143void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5144#ifdef DEBUG
5145 int original_height = frame_->height();
5146#endif
5147 Comment cmnt(masm_, "[ BinaryOperation");
5148
5149 if (node->op() == Token::AND || node->op() == Token::OR) {
5150 VirtualFrame::SpilledScope spilled_scope(frame_);
5151 GenerateLogicalBooleanOperation(node);
Steve Blocka7e24c12009-10-30 11:49:00 +00005152 } else {
5153 // Optimize for the case where (at least) one of the expressions
5154 // is a literal small integer.
5155 Literal* lliteral = node->left()->AsLiteral();
5156 Literal* rliteral = node->right()->AsLiteral();
5157 // NOTE: The code below assumes that the slow cases (calls to runtime)
5158 // never return a constant/immutable object.
5159 bool overwrite_left =
5160 (node->left()->AsBinaryOperation() != NULL &&
5161 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5162 bool overwrite_right =
5163 (node->right()->AsBinaryOperation() != NULL &&
5164 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5165
5166 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
Steve Block6ded16b2010-05-10 14:33:55 +01005167 VirtualFrame::RegisterAllocationScope scope(this);
5168 Load(node->left());
Steve Blocka7e24c12009-10-30 11:49:00 +00005169 SmiOperation(node->op(),
5170 rliteral->handle(),
5171 false,
5172 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005173 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
Steve Block6ded16b2010-05-10 14:33:55 +01005174 VirtualFrame::RegisterAllocationScope scope(this);
5175 Load(node->right());
Steve Blocka7e24c12009-10-30 11:49:00 +00005176 SmiOperation(node->op(),
5177 lliteral->handle(),
5178 true,
5179 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005180 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01005181 VirtualFrame::RegisterAllocationScope scope(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005182 OverwriteMode overwrite_mode = NO_OVERWRITE;
5183 if (overwrite_left) {
5184 overwrite_mode = OVERWRITE_LEFT;
5185 } else if (overwrite_right) {
5186 overwrite_mode = OVERWRITE_RIGHT;
5187 }
Steve Block6ded16b2010-05-10 14:33:55 +01005188 Load(node->left());
5189 Load(node->right());
5190 VirtualFrameBinaryOperation(node->op(), overwrite_mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00005191 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005192 }
5193 ASSERT(!has_valid_frame() ||
5194 (has_cc() && frame_->height() == original_height) ||
5195 (!has_cc() && frame_->height() == original_height + 1));
5196}
5197
5198
5199void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5200#ifdef DEBUG
5201 int original_height = frame_->height();
5202#endif
Steve Block6ded16b2010-05-10 14:33:55 +01005203 VirtualFrame::SpilledScope spilled_scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005204 __ ldr(r0, frame_->Function());
5205 frame_->EmitPush(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01005206 ASSERT_EQ(original_height + 1, frame_->height());
Steve Blocka7e24c12009-10-30 11:49:00 +00005207}
5208
5209
5210void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5211#ifdef DEBUG
5212 int original_height = frame_->height();
5213#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005214 Comment cmnt(masm_, "[ CompareOperation");
5215
Steve Block6ded16b2010-05-10 14:33:55 +01005216 VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
5217
Steve Blocka7e24c12009-10-30 11:49:00 +00005218 // Get the expressions from the node.
5219 Expression* left = node->left();
5220 Expression* right = node->right();
5221 Token::Value op = node->op();
5222
5223 // To make null checks efficient, we check if either left or right is the
5224 // literal 'null'. If so, we optimize the code by inlining a null check
5225 // instead of calling the (very) general runtime routine for checking
5226 // equality.
5227 if (op == Token::EQ || op == Token::EQ_STRICT) {
5228 bool left_is_null =
5229 left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
5230 bool right_is_null =
5231 right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
5232 // The 'null' value can only be equal to 'null' or 'undefined'.
5233 if (left_is_null || right_is_null) {
Steve Block6ded16b2010-05-10 14:33:55 +01005234 Load(left_is_null ? right : left);
5235 Register tos = frame_->PopToRegister();
5236 // JumpTargets can't cope with register allocation yet.
5237 frame_->SpillAll();
Steve Blocka7e24c12009-10-30 11:49:00 +00005238 __ LoadRoot(ip, Heap::kNullValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005239 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005240
5241 // The 'null' value is only equal to 'undefined' if using non-strict
5242 // comparisons.
5243 if (op != Token::EQ_STRICT) {
5244 true_target()->Branch(eq);
5245
5246 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005247 __ cmp(tos, Operand(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00005248 true_target()->Branch(eq);
5249
Steve Block6ded16b2010-05-10 14:33:55 +01005250 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005251 false_target()->Branch(eq);
5252
5253 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005254 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5255 __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
5256 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5257 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005258 }
5259
5260 cc_reg_ = eq;
5261 ASSERT(has_cc() && frame_->height() == original_height);
5262 return;
5263 }
5264 }
5265
5266 // To make typeof testing for natives implemented in JavaScript really
5267 // efficient, we generate special code for expressions of the form:
5268 // 'typeof <expression> == <string>'.
5269 UnaryOperation* operation = left->AsUnaryOperation();
5270 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5271 (operation != NULL && operation->op() == Token::TYPEOF) &&
5272 (right->AsLiteral() != NULL &&
5273 right->AsLiteral()->handle()->IsString())) {
5274 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5275
Steve Block6ded16b2010-05-10 14:33:55 +01005276 // Load the operand, move it to a register.
Steve Blocka7e24c12009-10-30 11:49:00 +00005277 LoadTypeofExpression(operation->expression());
Steve Block6ded16b2010-05-10 14:33:55 +01005278 Register tos = frame_->PopToRegister();
5279
5280 // JumpTargets can't cope with register allocation yet.
5281 frame_->SpillAll();
5282
5283 Register scratch = VirtualFrame::scratch0();
Steve Blocka7e24c12009-10-30 11:49:00 +00005284
5285 if (check->Equals(Heap::number_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005286 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005287 true_target()->Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01005288 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005289 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005290 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005291 cc_reg_ = eq;
5292
5293 } else if (check->Equals(Heap::string_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005294 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005295 false_target()->Branch(eq);
5296
Steve Block6ded16b2010-05-10 14:33:55 +01005297 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00005298
5299 // It can be an undetectable string object.
Steve Block6ded16b2010-05-10 14:33:55 +01005300 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5301 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5302 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005303 false_target()->Branch(eq);
5304
Steve Block6ded16b2010-05-10 14:33:55 +01005305 __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
5306 __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005307 cc_reg_ = lt;
5308
5309 } else if (check->Equals(Heap::boolean_symbol())) {
5310 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005311 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005312 true_target()->Branch(eq);
5313 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005314 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005315 cc_reg_ = eq;
5316
5317 } else if (check->Equals(Heap::undefined_symbol())) {
5318 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005319 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005320 true_target()->Branch(eq);
5321
Steve Block6ded16b2010-05-10 14:33:55 +01005322 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005323 false_target()->Branch(eq);
5324
5325 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005326 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5327 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5328 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5329 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005330
5331 cc_reg_ = eq;
5332
5333 } else if (check->Equals(Heap::function_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005334 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005335 false_target()->Branch(eq);
Steve Block6ded16b2010-05-10 14:33:55 +01005336 Register map_reg = scratch;
5337 __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
Steve Blockd0582a62009-12-15 09:54:21 +00005338 true_target()->Branch(eq);
5339 // Regular expressions are callable so typeof == 'function'.
Steve Block6ded16b2010-05-10 14:33:55 +01005340 __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005341 cc_reg_ = eq;
5342
5343 } else if (check->Equals(Heap::object_symbol())) {
Steve Block6ded16b2010-05-10 14:33:55 +01005344 __ tst(tos, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00005345 false_target()->Branch(eq);
5346
Steve Blocka7e24c12009-10-30 11:49:00 +00005347 __ LoadRoot(ip, Heap::kNullValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01005348 __ cmp(tos, ip);
Steve Blocka7e24c12009-10-30 11:49:00 +00005349 true_target()->Branch(eq);
5350
Steve Block6ded16b2010-05-10 14:33:55 +01005351 Register map_reg = scratch;
5352 __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
Steve Blockd0582a62009-12-15 09:54:21 +00005353 false_target()->Branch(eq);
5354
Steve Blocka7e24c12009-10-30 11:49:00 +00005355 // It can be an undetectable object.
Steve Block6ded16b2010-05-10 14:33:55 +01005356 __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5357 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5358 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
Steve Blocka7e24c12009-10-30 11:49:00 +00005359 false_target()->Branch(eq);
5360
Steve Block6ded16b2010-05-10 14:33:55 +01005361 __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5362 __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005363 false_target()->Branch(lt);
Steve Block6ded16b2010-05-10 14:33:55 +01005364 __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
Steve Blocka7e24c12009-10-30 11:49:00 +00005365 cc_reg_ = le;
5366
5367 } else {
5368 // Uncommon case: typeof testing against a string literal that is
5369 // never returned from the typeof operator.
5370 false_target()->Jump();
5371 }
5372 ASSERT(!has_valid_frame() ||
5373 (has_cc() && frame_->height() == original_height));
5374 return;
5375 }
5376
5377 switch (op) {
5378 case Token::EQ:
5379 Comparison(eq, left, right, false);
5380 break;
5381
5382 case Token::LT:
5383 Comparison(lt, left, right);
5384 break;
5385
5386 case Token::GT:
5387 Comparison(gt, left, right);
5388 break;
5389
5390 case Token::LTE:
5391 Comparison(le, left, right);
5392 break;
5393
5394 case Token::GTE:
5395 Comparison(ge, left, right);
5396 break;
5397
5398 case Token::EQ_STRICT:
5399 Comparison(eq, left, right, true);
5400 break;
5401
5402 case Token::IN: {
Steve Block6ded16b2010-05-10 14:33:55 +01005403 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005404 LoadAndSpill(left);
5405 LoadAndSpill(right);
Steve Blockd0582a62009-12-15 09:54:21 +00005406 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Steve Blocka7e24c12009-10-30 11:49:00 +00005407 frame_->EmitPush(r0);
5408 break;
5409 }
5410
5411 case Token::INSTANCEOF: {
Steve Block6ded16b2010-05-10 14:33:55 +01005412 VirtualFrame::SpilledScope scope(frame_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005413 LoadAndSpill(left);
5414 LoadAndSpill(right);
5415 InstanceofStub stub;
5416 frame_->CallStub(&stub, 2);
5417 // At this point if instanceof succeeded then r0 == 0.
5418 __ tst(r0, Operand(r0));
5419 cc_reg_ = eq;
5420 break;
5421 }
5422
5423 default:
5424 UNREACHABLE();
5425 }
5426 ASSERT((has_cc() && frame_->height() == original_height) ||
5427 (!has_cc() && frame_->height() == original_height + 1));
5428}
5429
5430
Steve Block6ded16b2010-05-10 14:33:55 +01005431class DeferredReferenceGetNamedValue: public DeferredCode {
5432 public:
5433 explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
5434 set_comment("[ DeferredReferenceGetNamedValue");
5435 }
5436
5437 virtual void Generate();
5438
5439 private:
5440 Handle<String> name_;
5441};
5442
5443
5444void DeferredReferenceGetNamedValue::Generate() {
5445 Register scratch1 = VirtualFrame::scratch0();
5446 Register scratch2 = VirtualFrame::scratch1();
5447 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
5448 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
5449
5450 // Setup the registers and call load IC.
5451 // On entry to this deferred code, r0 is assumed to already contain the
5452 // receiver from the top of the stack.
5453 __ mov(r2, Operand(name_));
5454
5455 // The rest of the instructions in the deferred code must be together.
5456 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5457 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5458 __ Call(ic, RelocInfo::CODE_TARGET);
5459 // The call must be followed by a nop(1) instruction to indicate that the
5460 // in-object has been inlined.
5461 __ nop(PROPERTY_ACCESS_INLINED);
5462
5463 // Block the constant pool for one more instruction after leaving this
5464 // constant pool block scope to include the branch instruction ending the
5465 // deferred code.
5466 __ BlockConstPoolFor(1);
5467 }
5468}
5469
5470
5471class DeferredReferenceGetKeyedValue: public DeferredCode {
5472 public:
Kristian Monsen25f61362010-05-21 11:50:48 +01005473 DeferredReferenceGetKeyedValue(Register key, Register receiver)
5474 : key_(key), receiver_(receiver) {
Steve Block6ded16b2010-05-10 14:33:55 +01005475 set_comment("[ DeferredReferenceGetKeyedValue");
5476 }
5477
5478 virtual void Generate();
Kristian Monsen25f61362010-05-21 11:50:48 +01005479
5480 private:
5481 Register key_;
5482 Register receiver_;
Steve Block6ded16b2010-05-10 14:33:55 +01005483};
5484
5485
5486void DeferredReferenceGetKeyedValue::Generate() {
Kristian Monsen25f61362010-05-21 11:50:48 +01005487 ASSERT((key_.is(r0) && receiver_.is(r1)) ||
5488 (key_.is(r1) && receiver_.is(r0)));
5489
Steve Block6ded16b2010-05-10 14:33:55 +01005490 Register scratch1 = VirtualFrame::scratch0();
5491 Register scratch2 = VirtualFrame::scratch1();
5492 __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
5493 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
5494
Kristian Monsen25f61362010-05-21 11:50:48 +01005495 // Ensure key in r0 and receiver in r1 to match keyed load ic calling
5496 // convention.
5497 if (key_.is(r1)) {
5498 __ Swap(r0, r1, ip);
5499 }
5500
Steve Block6ded16b2010-05-10 14:33:55 +01005501 // The rest of the instructions in the deferred code must be together.
5502 { Assembler::BlockConstPoolScope block_const_pool(masm_);
Kristian Monsen25f61362010-05-21 11:50:48 +01005503 // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
Steve Block6ded16b2010-05-10 14:33:55 +01005504 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
5505 __ Call(ic, RelocInfo::CODE_TARGET);
5506 // The call must be followed by a nop instruction to indicate that the
5507 // keyed load has been inlined.
5508 __ nop(PROPERTY_ACCESS_INLINED);
5509
5510 // Block the constant pool for one more instruction after leaving this
5511 // constant pool block scope to include the branch instruction ending the
5512 // deferred code.
5513 __ BlockConstPoolFor(1);
5514 }
5515}
5516
5517
5518class DeferredReferenceSetKeyedValue: public DeferredCode {
5519 public:
5520 DeferredReferenceSetKeyedValue() {
5521 set_comment("[ DeferredReferenceSetKeyedValue");
5522 }
5523
5524 virtual void Generate();
5525};
5526
5527
5528void DeferredReferenceSetKeyedValue::Generate() {
5529 Register scratch1 = VirtualFrame::scratch0();
5530 Register scratch2 = VirtualFrame::scratch1();
5531 __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
5532 __ IncrementCounter(
5533 &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
5534
5535 // The rest of the instructions in the deferred code must be together.
5536 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5537 // Call keyed load IC. It has receiver amd key on the stack and the value to
5538 // store in r0.
5539 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
5540 __ Call(ic, RelocInfo::CODE_TARGET);
5541 // The call must be followed by a nop instruction to indicate that the
5542 // keyed store has been inlined.
5543 __ nop(PROPERTY_ACCESS_INLINED);
5544
5545 // Block the constant pool for one more instruction after leaving this
5546 // constant pool block scope to include the branch instruction ending the
5547 // deferred code.
5548 __ BlockConstPoolFor(1);
5549 }
5550}
5551
5552
5553void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
5554 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
5555 Comment cmnt(masm(), "[ Load from named Property");
5556 // Setup the name register and call load IC.
5557 frame_->CallLoadIC(name,
5558 is_contextual
5559 ? RelocInfo::CODE_TARGET_CONTEXT
5560 : RelocInfo::CODE_TARGET);
5561 } else {
5562 // Inline the in-object property case.
5563 Comment cmnt(masm(), "[ Inlined named property load");
5564
5565 // Counter will be decremented in the deferred code. Placed here to avoid
5566 // having it in the instruction stream below where patching will occur.
5567 __ IncrementCounter(&Counters::named_load_inline, 1,
5568 frame_->scratch0(), frame_->scratch1());
5569
5570 // The following instructions are the inlined load of an in-object property.
5571 // Parts of this code is patched, so the exact instructions generated needs
5572 // to be fixed. Therefore the instruction pool is blocked when generating
5573 // this code
5574
5575 // Load the receiver from the stack.
5576 frame_->SpillAllButCopyTOSToR0();
5577
5578 DeferredReferenceGetNamedValue* deferred =
5579 new DeferredReferenceGetNamedValue(name);
5580
5581#ifdef DEBUG
5582 int kInlinedNamedLoadInstructions = 7;
5583 Label check_inlined_codesize;
5584 masm_->bind(&check_inlined_codesize);
5585#endif
5586
5587 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5588 // Check that the receiver is a heap object.
5589 __ tst(r0, Operand(kSmiTagMask));
5590 deferred->Branch(eq);
5591
5592 // Check the map. The null map used below is patched by the inline cache
5593 // code.
5594 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5595 __ mov(r3, Operand(Factory::null_value()));
5596 __ cmp(r2, r3);
5597 deferred->Branch(ne);
5598
5599 // Initially use an invalid index. The index will be patched by the
5600 // inline cache code.
5601 __ ldr(r0, MemOperand(r0, 0));
5602
5603 // Make sure that the expected number of instructions are generated.
5604 ASSERT_EQ(kInlinedNamedLoadInstructions,
5605 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5606 }
5607
5608 deferred->BindExit();
5609 }
5610}
5611
5612
5613void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
5614#ifdef DEBUG
5615 int expected_height = frame_->height() - (is_contextual ? 1 : 2);
5616#endif
5617 frame_->CallStoreIC(name, is_contextual);
5618
5619 ASSERT_EQ(expected_height, frame_->height());
5620}
5621
5622
5623void CodeGenerator::EmitKeyedLoad() {
5624 if (loop_nesting() == 0) {
5625 Comment cmnt(masm_, "[ Load from keyed property");
5626 frame_->CallKeyedLoadIC();
5627 } else {
5628 // Inline the keyed load.
5629 Comment cmnt(masm_, "[ Inlined load from keyed property");
5630
5631 // Counter will be decremented in the deferred code. Placed here to avoid
5632 // having it in the instruction stream below where patching will occur.
5633 __ IncrementCounter(&Counters::keyed_load_inline, 1,
5634 frame_->scratch0(), frame_->scratch1());
5635
Kristian Monsen25f61362010-05-21 11:50:48 +01005636 // Load the key and receiver from the stack.
5637 Register key = frame_->PopToRegister();
5638 Register receiver = frame_->PopToRegister(key);
Steve Block6ded16b2010-05-10 14:33:55 +01005639 VirtualFrame::SpilledScope spilled(frame_);
5640
Kristian Monsen25f61362010-05-21 11:50:48 +01005641 // The deferred code expects key and receiver in registers.
Steve Block6ded16b2010-05-10 14:33:55 +01005642 DeferredReferenceGetKeyedValue* deferred =
Kristian Monsen25f61362010-05-21 11:50:48 +01005643 new DeferredReferenceGetKeyedValue(key, receiver);
Steve Block6ded16b2010-05-10 14:33:55 +01005644
5645 // Check that the receiver is a heap object.
5646 __ tst(receiver, Operand(kSmiTagMask));
5647 deferred->Branch(eq);
5648
5649 // The following instructions are the part of the inlined load keyed
5650 // property code which can be patched. Therefore the exact number of
5651 // instructions generated need to be fixed, so the constant pool is blocked
5652 // while generating this code.
Steve Block6ded16b2010-05-10 14:33:55 +01005653 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5654 Register scratch1 = VirtualFrame::scratch0();
5655 Register scratch2 = VirtualFrame::scratch1();
5656 // Check the map. The null map used below is patched by the inline cache
5657 // code.
5658 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
Kristian Monsen25f61362010-05-21 11:50:48 +01005659#ifdef DEBUG
5660 Label check_inlined_codesize;
5661 masm_->bind(&check_inlined_codesize);
5662#endif
Steve Block6ded16b2010-05-10 14:33:55 +01005663 __ mov(scratch2, Operand(Factory::null_value()));
5664 __ cmp(scratch1, scratch2);
5665 deferred->Branch(ne);
5666
5667 // Check that the key is a smi.
5668 __ tst(key, Operand(kSmiTagMask));
5669 deferred->Branch(ne);
5670
5671 // Get the elements array from the receiver and check that it
5672 // is not a dictionary.
5673 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
5674 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
5675 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5676 __ cmp(scratch2, ip);
5677 deferred->Branch(ne);
5678
5679 // Check that key is within bounds. Use unsigned comparison to handle
5680 // negative keys.
5681 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
5682 __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
5683 deferred->Branch(ls); // Unsigned less equal.
5684
5685 // Load and check that the result is not the hole (key is a smi).
5686 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
5687 __ add(scratch1,
5688 scratch1,
5689 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
Kristian Monsen25f61362010-05-21 11:50:48 +01005690 __ ldr(scratch1,
Steve Block6ded16b2010-05-10 14:33:55 +01005691 MemOperand(scratch1, key, LSL,
5692 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
Kristian Monsen25f61362010-05-21 11:50:48 +01005693 __ cmp(scratch1, scratch2);
Steve Block6ded16b2010-05-10 14:33:55 +01005694 deferred->Branch(eq);
5695
Kristian Monsen25f61362010-05-21 11:50:48 +01005696 __ mov(r0, scratch1);
Steve Block6ded16b2010-05-10 14:33:55 +01005697 // Make sure that the expected number of instructions are generated.
Kristian Monsen25f61362010-05-21 11:50:48 +01005698 ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatchSize,
Steve Block6ded16b2010-05-10 14:33:55 +01005699 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5700 }
5701
5702 deferred->BindExit();
5703 }
5704}
5705
5706
5707void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
5708 VirtualFrame::SpilledScope scope(frame_);
5709 // Generate inlined version of the keyed store if the code is in a loop
5710 // and the key is likely to be a smi.
5711 if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
5712 // Inline the keyed store.
5713 Comment cmnt(masm_, "[ Inlined store to keyed property");
5714
5715 DeferredReferenceSetKeyedValue* deferred =
5716 new DeferredReferenceSetKeyedValue();
5717
5718 // Counter will be decremented in the deferred code. Placed here to avoid
5719 // having it in the instruction stream below where patching will occur.
5720 __ IncrementCounter(&Counters::keyed_store_inline, 1,
5721 frame_->scratch0(), frame_->scratch1());
5722
5723 // Check that the value is a smi. As this inlined code does not set the
5724 // write barrier it is only possible to store smi values.
5725 __ tst(r0, Operand(kSmiTagMask));
5726 deferred->Branch(ne);
5727
5728 // Load the key and receiver from the stack.
5729 __ ldr(r1, MemOperand(sp, 0));
5730 __ ldr(r2, MemOperand(sp, kPointerSize));
5731
5732 // Check that the key is a smi.
5733 __ tst(r1, Operand(kSmiTagMask));
5734 deferred->Branch(ne);
5735
5736 // Check that the receiver is a heap object.
5737 __ tst(r2, Operand(kSmiTagMask));
5738 deferred->Branch(eq);
5739
5740 // Check that the receiver is a JSArray.
5741 __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
5742 deferred->Branch(ne);
5743
5744 // Check that the key is within bounds. Both the key and the length of
5745 // the JSArray are smis. Use unsigned comparison to handle negative keys.
5746 __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
5747 __ cmp(r3, r1);
5748 deferred->Branch(ls); // Unsigned less equal.
5749
5750 // The following instructions are the part of the inlined store keyed
5751 // property code which can be patched. Therefore the exact number of
5752 // instructions generated need to be fixed, so the constant pool is blocked
5753 // while generating this code.
5754#ifdef DEBUG
5755 int kInlinedKeyedStoreInstructions = 7;
5756 Label check_inlined_codesize;
5757 masm_->bind(&check_inlined_codesize);
5758#endif
5759 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5760 // Get the elements array from the receiver and check that it
5761 // is not a dictionary.
5762 __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
5763 __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
5764 // Read the fixed array map from the constant pool (not from the root
5765 // array) so that the value can be patched. When debugging, we patch this
5766 // comparison to always fail so that we will hit the IC call in the
5767 // deferred code which will allow the debugger to break for fast case
5768 // stores.
5769 __ mov(r5, Operand(Factory::fixed_array_map()));
5770 __ cmp(r4, r5);
5771 deferred->Branch(ne);
5772
5773 // Store the value.
5774 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5775 __ str(r0, MemOperand(r3, r1, LSL,
5776 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
5777
5778 // Make sure that the expected number of instructions are generated.
5779 ASSERT_EQ(kInlinedKeyedStoreInstructions,
5780 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5781 }
5782
5783 deferred->BindExit();
5784 } else {
5785 frame()->CallKeyedStoreIC();
5786 }
Leon Clarked91b9f72010-01-27 17:25:45 +00005787}
5788
5789
Steve Blocka7e24c12009-10-30 11:49:00 +00005790#ifdef DEBUG
5791bool CodeGenerator::HasValidEntryRegisters() { return true; }
5792#endif
5793
5794
5795#undef __
5796#define __ ACCESS_MASM(masm)
5797
5798
5799Handle<String> Reference::GetName() {
5800 ASSERT(type_ == NAMED);
5801 Property* property = expression_->AsProperty();
5802 if (property == NULL) {
5803 // Global variable reference treated as a named property reference.
5804 VariableProxy* proxy = expression_->AsVariableProxy();
5805 ASSERT(proxy->AsVariable() != NULL);
5806 ASSERT(proxy->AsVariable()->is_global());
5807 return proxy->name();
5808 } else {
5809 Literal* raw_name = property->key()->AsLiteral();
5810 ASSERT(raw_name != NULL);
5811 return Handle<String>(String::cast(*raw_name->handle()));
5812 }
5813}
5814
5815
Steve Blockd0582a62009-12-15 09:54:21 +00005816void Reference::GetValue() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005817 ASSERT(cgen_->HasValidEntryRegisters());
5818 ASSERT(!is_illegal());
5819 ASSERT(!cgen_->has_cc());
5820 MacroAssembler* masm = cgen_->masm();
5821 Property* property = expression_->AsProperty();
5822 if (property != NULL) {
5823 cgen_->CodeForSourcePosition(property->position());
5824 }
5825
5826 switch (type_) {
5827 case SLOT: {
5828 Comment cmnt(masm, "[ Load from Slot");
5829 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5830 ASSERT(slot != NULL);
Steve Block6ded16b2010-05-10 14:33:55 +01005831 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
Kristian Monsen25f61362010-05-21 11:50:48 +01005832 if (!persist_after_get_) {
5833 cgen_->UnloadReference(this);
5834 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005835 break;
5836 }
5837
5838 case NAMED: {
Steve Blocka7e24c12009-10-30 11:49:00 +00005839 Variable* var = expression_->AsVariableProxy()->AsVariable();
Steve Block6ded16b2010-05-10 14:33:55 +01005840 bool is_global = var != NULL;
5841 ASSERT(!is_global || var->is_global());
5842 cgen_->EmitNamedLoad(GetName(), is_global);
5843 cgen_->frame()->EmitPush(r0);
Kristian Monsen25f61362010-05-21 11:50:48 +01005844 if (!persist_after_get_) {
5845 cgen_->UnloadReference(this);
5846 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005847 break;
5848 }
5849
5850 case KEYED: {
Kristian Monsen25f61362010-05-21 11:50:48 +01005851 if (persist_after_get_) {
5852 cgen_->frame()->Dup2();
5853 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005854 ASSERT(property != NULL);
Steve Block6ded16b2010-05-10 14:33:55 +01005855 cgen_->EmitKeyedLoad();
Leon Clarked91b9f72010-01-27 17:25:45 +00005856 cgen_->frame()->EmitPush(r0);
Kristian Monsen25f61362010-05-21 11:50:48 +01005857 if (!persist_after_get_) set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005858 break;
5859 }
5860
5861 default:
5862 UNREACHABLE();
5863 }
5864}
5865
5866
5867void Reference::SetValue(InitState init_state) {
5868 ASSERT(!is_illegal());
5869 ASSERT(!cgen_->has_cc());
5870 MacroAssembler* masm = cgen_->masm();
5871 VirtualFrame* frame = cgen_->frame();
5872 Property* property = expression_->AsProperty();
5873 if (property != NULL) {
5874 cgen_->CodeForSourcePosition(property->position());
5875 }
5876
5877 switch (type_) {
5878 case SLOT: {
5879 Comment cmnt(masm, "[ Store to Slot");
5880 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
Leon Clarkee46be812010-01-19 14:06:41 +00005881 cgen_->StoreToSlot(slot, init_state);
Steve Block6ded16b2010-05-10 14:33:55 +01005882 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005883 break;
5884 }
5885
5886 case NAMED: {
5887 Comment cmnt(masm, "[ Store to named Property");
Steve Block6ded16b2010-05-10 14:33:55 +01005888 cgen_->EmitNamedStore(GetName(), false);
Steve Blocka7e24c12009-10-30 11:49:00 +00005889 frame->EmitPush(r0);
Andrei Popescu402d9372010-02-26 13:31:12 +00005890 set_unloaded();
Steve Blocka7e24c12009-10-30 11:49:00 +00005891 break;
5892 }
5893
5894 case KEYED: {
Steve Block6ded16b2010-05-10 14:33:55 +01005895 VirtualFrame::SpilledScope scope(frame);
Steve Blocka7e24c12009-10-30 11:49:00 +00005896 Comment cmnt(masm, "[ Store to keyed Property");
5897 Property* property = expression_->AsProperty();
5898 ASSERT(property != NULL);
5899 cgen_->CodeForSourcePosition(property->position());
5900
Steve Block6ded16b2010-05-10 14:33:55 +01005901 frame->EmitPop(r0); // Value.
5902 cgen_->EmitKeyedStore(property->key()->type());
Steve Blocka7e24c12009-10-30 11:49:00 +00005903 frame->EmitPush(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00005904 cgen_->UnloadReference(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005905 break;
5906 }
5907
5908 default:
5909 UNREACHABLE();
5910 }
5911}
5912
5913
Leon Clarkee46be812010-01-19 14:06:41 +00005914void FastNewClosureStub::Generate(MacroAssembler* masm) {
Steve Block6ded16b2010-05-10 14:33:55 +01005915 // Create a new closure from the given function info in new
5916 // space. Set the context to the current context in cp.
Leon Clarkee46be812010-01-19 14:06:41 +00005917 Label gc;
5918
Steve Block6ded16b2010-05-10 14:33:55 +01005919 // Pop the function info from the stack.
Leon Clarkee46be812010-01-19 14:06:41 +00005920 __ pop(r3);
5921
5922 // Attempt to allocate new JSFunction in new space.
Kristian Monsen25f61362010-05-21 11:50:48 +01005923 __ AllocateInNewSpace(JSFunction::kSize,
Leon Clarkee46be812010-01-19 14:06:41 +00005924 r0,
5925 r1,
5926 r2,
5927 &gc,
5928 TAG_OBJECT);
5929
5930 // Compute the function map in the current global context and set that
5931 // as the map of the allocated object.
5932 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5933 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5934 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
5935 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5936
Steve Block6ded16b2010-05-10 14:33:55 +01005937 // Initialize the rest of the function. We don't have to update the
5938 // write barrier because the allocated object is in new space.
5939 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
5940 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
5941 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5942 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
5943 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
5944 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
5945 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
5946 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00005947
Steve Block6ded16b2010-05-10 14:33:55 +01005948 // Return result. The argument function info has been popped already.
Leon Clarkee46be812010-01-19 14:06:41 +00005949 __ Ret();
5950
5951 // Create a new closure through the slower runtime call.
5952 __ bind(&gc);
Steve Block6ded16b2010-05-10 14:33:55 +01005953 __ Push(cp, r3);
5954 __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
Leon Clarkee46be812010-01-19 14:06:41 +00005955}
5956
5957
5958void FastNewContextStub::Generate(MacroAssembler* masm) {
5959 // Try to allocate the context in new space.
5960 Label gc;
5961 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
5962
5963 // Attempt to allocate the context in new space.
Kristian Monsen25f61362010-05-21 11:50:48 +01005964 __ AllocateInNewSpace(FixedArray::SizeFor(length),
Leon Clarkee46be812010-01-19 14:06:41 +00005965 r0,
5966 r1,
5967 r2,
5968 &gc,
5969 TAG_OBJECT);
5970
5971 // Load the function from the stack.
Andrei Popescu402d9372010-02-26 13:31:12 +00005972 __ ldr(r3, MemOperand(sp, 0));
Leon Clarkee46be812010-01-19 14:06:41 +00005973
5974 // Setup the object header.
5975 __ LoadRoot(r2, Heap::kContextMapRootIndex);
5976 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5977 __ mov(r2, Operand(length));
5978 __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
5979
5980 // Setup the fixed slots.
5981 __ mov(r1, Operand(Smi::FromInt(0)));
5982 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
5983 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
5984 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5985 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
5986
5987 // Copy the global object from the surrounding context.
5988 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5989 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
5990
5991 // Initialize the rest of the slots to undefined.
5992 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
5993 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
5994 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
5995 }
5996
5997 // Remove the on-stack argument and return.
5998 __ mov(cp, r0);
5999 __ pop();
6000 __ Ret();
6001
6002 // Need to collect. Call into runtime system.
6003 __ bind(&gc);
Steve Block6ded16b2010-05-10 14:33:55 +01006004 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
Leon Clarkee46be812010-01-19 14:06:41 +00006005}
6006
6007
Andrei Popescu402d9372010-02-26 13:31:12 +00006008void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6009 // Stack layout on entry:
6010 //
6011 // [sp]: constant elements.
6012 // [sp + kPointerSize]: literal index.
6013 // [sp + (2 * kPointerSize)]: literals array.
6014
6015 // All sizes here are multiples of kPointerSize.
6016 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6017 int size = JSArray::kSize + elements_size;
6018
6019 // Load boilerplate object into r3 and check if we need to create a
6020 // boilerplate.
6021 Label slow_case;
6022 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6023 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6024 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6025 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6026 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6027 __ cmp(r3, ip);
6028 __ b(eq, &slow_case);
6029
6030 // Allocate both the JS array and the elements array in one big
6031 // allocation. This avoids multiple limit checks.
Kristian Monsen25f61362010-05-21 11:50:48 +01006032 __ AllocateInNewSpace(size,
Andrei Popescu402d9372010-02-26 13:31:12 +00006033 r0,
6034 r1,
6035 r2,
6036 &slow_case,
6037 TAG_OBJECT);
6038
6039 // Copy the JS array part.
6040 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6041 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6042 __ ldr(r1, FieldMemOperand(r3, i));
6043 __ str(r1, FieldMemOperand(r0, i));
6044 }
6045 }
6046
6047 if (length_ > 0) {
6048 // Get hold of the elements array of the boilerplate and setup the
6049 // elements pointer in the resulting object.
6050 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
6051 __ add(r2, r0, Operand(JSArray::kSize));
6052 __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
6053
6054 // Copy the elements array.
6055 for (int i = 0; i < elements_size; i += kPointerSize) {
6056 __ ldr(r1, FieldMemOperand(r3, i));
6057 __ str(r1, FieldMemOperand(r2, i));
6058 }
6059 }
6060
6061 // Return and remove the on-stack parameters.
6062 __ add(sp, sp, Operand(3 * kPointerSize));
6063 __ Ret();
6064
6065 __ bind(&slow_case);
Steve Block6ded16b2010-05-10 14:33:55 +01006066 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00006067}
6068
6069
6070// Takes a Smi and converts to an IEEE 64 bit floating point value in two
6071// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
6072// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
6073// scratch register. Destroys the source register. No GC occurs during this
6074// stub so you don't have to set up the frame.
6075class ConvertToDoubleStub : public CodeStub {
6076 public:
6077 ConvertToDoubleStub(Register result_reg_1,
6078 Register result_reg_2,
6079 Register source_reg,
6080 Register scratch_reg)
6081 : result1_(result_reg_1),
6082 result2_(result_reg_2),
6083 source_(source_reg),
6084 zeros_(scratch_reg) { }
6085
6086 private:
6087 Register result1_;
6088 Register result2_;
6089 Register source_;
6090 Register zeros_;
6091
6092 // Minor key encoding in 16 bits.
6093 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
6094 class OpBits: public BitField<Token::Value, 2, 14> {};
6095
6096 Major MajorKey() { return ConvertToDouble; }
6097 int MinorKey() {
6098 // Encode the parameters in a unique 16 bit value.
6099 return result1_.code() +
6100 (result2_.code() << 4) +
6101 (source_.code() << 8) +
6102 (zeros_.code() << 12);
6103 }
6104
6105 void Generate(MacroAssembler* masm);
6106
6107 const char* GetName() { return "ConvertToDoubleStub"; }
6108
6109#ifdef DEBUG
6110 void Print() { PrintF("ConvertToDoubleStub\n"); }
6111#endif
6112};
6113
6114
6115void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
6116#ifndef BIG_ENDIAN_FLOATING_POINT
6117 Register exponent = result1_;
6118 Register mantissa = result2_;
6119#else
6120 Register exponent = result2_;
6121 Register mantissa = result1_;
6122#endif
6123 Label not_special;
6124 // Convert from Smi to integer.
6125 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
6126 // Move sign bit from source to destination. This works because the sign bit
6127 // in the exponent word of the double has the same position and polarity as
6128 // the 2's complement sign bit in a Smi.
6129 ASSERT(HeapNumber::kSignMask == 0x80000000u);
6130 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
6131 // Subtract from 0 if source was negative.
6132 __ rsb(source_, source_, Operand(0), LeaveCC, ne);
Steve Block6ded16b2010-05-10 14:33:55 +01006133
6134 // We have -1, 0 or 1, which we treat specially. Register source_ contains
6135 // absolute value: it is either equal to 1 (special case of -1 and 1),
6136 // greater than 1 (not a special case) or less than 1 (special case of 0).
Steve Blocka7e24c12009-10-30 11:49:00 +00006137 __ cmp(source_, Operand(1));
6138 __ b(gt, &not_special);
6139
Steve Blocka7e24c12009-10-30 11:49:00 +00006140 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
6141 static const uint32_t exponent_word_for_1 =
6142 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
Steve Block6ded16b2010-05-10 14:33:55 +01006143 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
Steve Blocka7e24c12009-10-30 11:49:00 +00006144 // 1, 0 and -1 all have 0 for the second word.
6145 __ mov(mantissa, Operand(0));
6146 __ Ret();
6147
6148 __ bind(&not_special);
Steve Block6ded16b2010-05-10 14:33:55 +01006149 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
Steve Blocka7e24c12009-10-30 11:49:00 +00006150 // Gets the wrong answer for 0, but we already checked for that case above.
Steve Block6ded16b2010-05-10 14:33:55 +01006151 __ CountLeadingZeros(source_, mantissa, zeros_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006152 // Compute exponent and or it into the exponent register.
Steve Block6ded16b2010-05-10 14:33:55 +01006153 // We use mantissa as a scratch register here.
Steve Blocka7e24c12009-10-30 11:49:00 +00006154 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
6155 __ orr(exponent,
6156 exponent,
6157 Operand(mantissa, LSL, HeapNumber::kExponentShift));
6158 // Shift up the source chopping the top bit off.
6159 __ add(zeros_, zeros_, Operand(1));
6160 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
6161 __ mov(source_, Operand(source_, LSL, zeros_));
6162 // Compute lower part of fraction (last 12 bits).
6163 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
6164 // And the top (top 20 bits).
6165 __ orr(exponent,
6166 exponent,
6167 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
6168 __ Ret();
6169}
6170
6171
Steve Blocka7e24c12009-10-30 11:49:00 +00006172// See comment for class.
Steve Blockd0582a62009-12-15 09:54:21 +00006173void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006174 Label max_negative_int;
6175 // the_int_ has the answer which is a signed int32 but not a Smi.
6176 // We test for the special value that has a different exponent. This test
6177 // has the neat side effect of setting the flags according to the sign.
6178 ASSERT(HeapNumber::kSignMask == 0x80000000u);
6179 __ cmp(the_int_, Operand(0x80000000u));
6180 __ b(eq, &max_negative_int);
6181 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
6182 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
6183 uint32_t non_smi_exponent =
6184 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6185 __ mov(scratch_, Operand(non_smi_exponent));
6186 // Set the sign bit in scratch_ if the value was negative.
6187 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
6188 // Subtract from 0 if the value was negative.
6189 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
6190 // We should be masking the implict first digit of the mantissa away here,
6191 // but it just ends up combining harmlessly with the last digit of the
6192 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
6193 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
6194 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
6195 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6196 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
6197 __ str(scratch_, FieldMemOperand(the_heap_number_,
6198 HeapNumber::kExponentOffset));
6199 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
6200 __ str(scratch_, FieldMemOperand(the_heap_number_,
6201 HeapNumber::kMantissaOffset));
6202 __ Ret();
6203
6204 __ bind(&max_negative_int);
6205 // The max negative int32 is stored as a positive number in the mantissa of
6206 // a double because it uses a sign bit instead of using two's complement.
6207 // The actual mantissa bits stored are all 0 because the implicit most
6208 // significant 1 bit is not stored.
6209 non_smi_exponent += 1 << HeapNumber::kExponentShift;
6210 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
6211 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
6212 __ mov(ip, Operand(0));
6213 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
6214 __ Ret();
6215}
6216
6217
6218// Handle the case where the lhs and rhs are the same object.
6219// Equality is almost reflexive (everything but NaN), so this is a test
6220// for "identity and not NaN".
6221static void EmitIdenticalObjectComparison(MacroAssembler* masm,
6222 Label* slow,
Leon Clarkee46be812010-01-19 14:06:41 +00006223 Condition cc,
6224 bool never_nan_nan) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006225 Label not_identical;
Leon Clarkee46be812010-01-19 14:06:41 +00006226 Label heap_number, return_equal;
6227 Register exp_mask_reg = r5;
Steve Block6ded16b2010-05-10 14:33:55 +01006228 __ cmp(r0, r1);
Steve Blocka7e24c12009-10-30 11:49:00 +00006229 __ b(ne, &not_identical);
6230
Leon Clarkee46be812010-01-19 14:06:41 +00006231 // The two objects are identical. If we know that one of them isn't NaN then
6232 // we now know they test equal.
6233 if (cc != eq || !never_nan_nan) {
6234 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00006235
Leon Clarkee46be812010-01-19 14:06:41 +00006236 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6237 // so we do the second best thing - test it ourselves.
6238 // They are both equal and they are not both Smis so both of them are not
6239 // Smis. If it's not a heap number, then return equal.
6240 if (cc == lt || cc == gt) {
6241 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006242 __ b(ge, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00006243 } else {
6244 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6245 __ b(eq, &heap_number);
6246 // Comparing JS objects with <=, >= is complicated.
6247 if (cc != eq) {
6248 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
6249 __ b(ge, slow);
6250 // Normally here we fall through to return_equal, but undefined is
6251 // special: (undefined == undefined) == true, but
6252 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
6253 if (cc == le || cc == ge) {
6254 __ cmp(r4, Operand(ODDBALL_TYPE));
6255 __ b(ne, &return_equal);
6256 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01006257 __ cmp(r0, r2);
Leon Clarkee46be812010-01-19 14:06:41 +00006258 __ b(ne, &return_equal);
6259 if (cc == le) {
6260 // undefined <= undefined should fail.
6261 __ mov(r0, Operand(GREATER));
6262 } else {
6263 // undefined >= undefined should fail.
6264 __ mov(r0, Operand(LESS));
6265 }
6266 __ mov(pc, Operand(lr)); // Return.
Steve Blockd0582a62009-12-15 09:54:21 +00006267 }
Steve Blockd0582a62009-12-15 09:54:21 +00006268 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006269 }
6270 }
Leon Clarkee46be812010-01-19 14:06:41 +00006271
Steve Blocka7e24c12009-10-30 11:49:00 +00006272 __ bind(&return_equal);
6273 if (cc == lt) {
6274 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
6275 } else if (cc == gt) {
6276 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
6277 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00006278 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
Steve Blocka7e24c12009-10-30 11:49:00 +00006279 }
6280 __ mov(pc, Operand(lr)); // Return.
6281
Leon Clarkee46be812010-01-19 14:06:41 +00006282 if (cc != eq || !never_nan_nan) {
6283 // For less and greater we don't have to check for NaN since the result of
6284 // x < x is false regardless. For the others here is some code to check
6285 // for NaN.
6286 if (cc != lt && cc != gt) {
6287 __ bind(&heap_number);
6288 // It is a heap number, so return non-equal if it's NaN and equal if it's
6289 // not NaN.
Steve Blocka7e24c12009-10-30 11:49:00 +00006290
Leon Clarkee46be812010-01-19 14:06:41 +00006291 // The representation of NaN values has all exponent bits (52..62) set,
6292 // and not all mantissa bits (0..51) clear.
6293 // Read top bits of double representation (second word of value).
6294 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6295 // Test that exponent bits are all set.
6296 __ and_(r3, r2, Operand(exp_mask_reg));
6297 __ cmp(r3, Operand(exp_mask_reg));
6298 __ b(ne, &return_equal);
6299
6300 // Shift out flag and all exponent bits, retaining only mantissa.
6301 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
6302 // Or with all low-bits of mantissa.
6303 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6304 __ orr(r0, r3, Operand(r2), SetCC);
6305 // For equal we already have the right value in r0: Return zero (equal)
6306 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
6307 // not (it's a NaN). For <= and >= we need to load r0 with the failing
6308 // value if it's a NaN.
6309 if (cc != eq) {
6310 // All-zero means Infinity means equal.
6311 __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
6312 if (cc == le) {
6313 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
6314 } else {
6315 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
6316 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006317 }
Leon Clarkee46be812010-01-19 14:06:41 +00006318 __ mov(pc, Operand(lr)); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00006319 }
Leon Clarkee46be812010-01-19 14:06:41 +00006320 // No fall through here.
Steve Blocka7e24c12009-10-30 11:49:00 +00006321 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006322
6323 __ bind(&not_identical);
6324}
6325
6326
6327// See comment at call site.
6328static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Leon Clarkee46be812010-01-19 14:06:41 +00006329 Label* lhs_not_nan,
Steve Blocka7e24c12009-10-30 11:49:00 +00006330 Label* slow,
6331 bool strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006332 Label rhs_is_smi;
Steve Blocka7e24c12009-10-30 11:49:00 +00006333 __ tst(r0, Operand(kSmiTagMask));
Leon Clarked91b9f72010-01-27 17:25:45 +00006334 __ b(eq, &rhs_is_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00006335
Leon Clarked91b9f72010-01-27 17:25:45 +00006336 // Lhs is a Smi. Check whether the rhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00006337 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6338 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006339 // If rhs is not a number and lhs is a Smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00006340 // succeed. Return non-equal (r0 is already not zero)
6341 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
6342 } else {
6343 // Smi compared non-strictly with a non-Smi non-heap-number. Call
6344 // the runtime.
6345 __ b(ne, slow);
6346 }
6347
Leon Clarked91b9f72010-01-27 17:25:45 +00006348 // Lhs (r1) is a smi, rhs (r0) is a number.
Steve Blockd0582a62009-12-15 09:54:21 +00006349 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006350 // Convert lhs to a double in d7 .
Steve Blockd0582a62009-12-15 09:54:21 +00006351 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00006352 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
6353 __ vmov(s15, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01006354 __ vcvt_f64_s32(d7, s15);
Leon Clarked91b9f72010-01-27 17:25:45 +00006355 // Load the double from rhs, tagged HeapNumber r0, to d6.
6356 __ sub(r7, r0, Operand(kHeapObjectTag));
6357 __ vldr(d6, r7, HeapNumber::kValueOffset);
Steve Blockd0582a62009-12-15 09:54:21 +00006358 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006359 __ push(lr);
6360 // Convert lhs to a double in r2, r3.
Steve Blockd0582a62009-12-15 09:54:21 +00006361 __ mov(r7, Operand(r1));
6362 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6363 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00006364 // Load rhs to a double in r0, r1.
Kristian Monsen25f61362010-05-21 11:50:48 +01006365 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006366 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00006367 }
6368
Steve Blocka7e24c12009-10-30 11:49:00 +00006369 // We now have both loaded as doubles but we can skip the lhs nan check
Leon Clarked91b9f72010-01-27 17:25:45 +00006370 // since it's a smi.
Leon Clarkee46be812010-01-19 14:06:41 +00006371 __ jmp(lhs_not_nan);
Steve Blocka7e24c12009-10-30 11:49:00 +00006372
Leon Clarked91b9f72010-01-27 17:25:45 +00006373 __ bind(&rhs_is_smi);
6374 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
Steve Blocka7e24c12009-10-30 11:49:00 +00006375 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
6376 if (strict) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006377 // If lhs is not a number and rhs is a smi then strict equality cannot
Steve Blocka7e24c12009-10-30 11:49:00 +00006378 // succeed. Return non-equal.
6379 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
6380 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
6381 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006382 // Smi compared non-strictly with a non-smi non-heap-number. Call
Steve Blocka7e24c12009-10-30 11:49:00 +00006383 // the runtime.
6384 __ b(ne, slow);
6385 }
6386
Leon Clarked91b9f72010-01-27 17:25:45 +00006387 // Rhs (r0) is a smi, lhs (r1) is a heap number.
Steve Blockd0582a62009-12-15 09:54:21 +00006388 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarked91b9f72010-01-27 17:25:45 +00006389 // Convert rhs to a double in d6 .
Steve Blockd0582a62009-12-15 09:54:21 +00006390 CpuFeatures::Scope scope(VFP3);
Leon Clarked91b9f72010-01-27 17:25:45 +00006391 // Load the double from lhs, tagged HeapNumber r1, to d7.
6392 __ sub(r7, r1, Operand(kHeapObjectTag));
6393 __ vldr(d7, r7, HeapNumber::kValueOffset);
6394 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6395 __ vmov(s13, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01006396 __ vcvt_f64_s32(d6, s13);
Steve Blockd0582a62009-12-15 09:54:21 +00006397 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00006398 __ push(lr);
6399 // Load lhs to a double in r2, r3.
Kristian Monsen25f61362010-05-21 11:50:48 +01006400 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006401 // Convert rhs to a double in r0, r1.
Steve Blockd0582a62009-12-15 09:54:21 +00006402 __ mov(r7, Operand(r0));
6403 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6404 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
Leon Clarked91b9f72010-01-27 17:25:45 +00006405 __ pop(lr);
Steve Blockd0582a62009-12-15 09:54:21 +00006406 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006407 // Fall through to both_loaded_as_doubles.
6408}
6409
6410
Leon Clarkee46be812010-01-19 14:06:41 +00006411void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006412 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00006413 Register rhs_exponent = exp_first ? r0 : r1;
6414 Register lhs_exponent = exp_first ? r2 : r3;
6415 Register rhs_mantissa = exp_first ? r1 : r0;
6416 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00006417 Label one_is_nan, neither_is_nan;
Leon Clarkee46be812010-01-19 14:06:41 +00006418 Label lhs_not_nan_exp_mask_is_loaded;
Steve Blocka7e24c12009-10-30 11:49:00 +00006419
6420 Register exp_mask_reg = r5;
6421
6422 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00006423 __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
6424 __ cmp(r4, Operand(exp_mask_reg));
Leon Clarkee46be812010-01-19 14:06:41 +00006425 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
Steve Blocka7e24c12009-10-30 11:49:00 +00006426 __ mov(r4,
6427 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6428 SetCC);
6429 __ b(ne, &one_is_nan);
6430 __ cmp(lhs_mantissa, Operand(0));
Leon Clarkee46be812010-01-19 14:06:41 +00006431 __ b(ne, &one_is_nan);
6432
6433 __ bind(lhs_not_nan);
6434 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
6435 __ bind(&lhs_not_nan_exp_mask_is_loaded);
6436 __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
6437 __ cmp(r4, Operand(exp_mask_reg));
6438 __ b(ne, &neither_is_nan);
6439 __ mov(r4,
6440 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6441 SetCC);
6442 __ b(ne, &one_is_nan);
6443 __ cmp(rhs_mantissa, Operand(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00006444 __ b(eq, &neither_is_nan);
6445
6446 __ bind(&one_is_nan);
6447 // NaN comparisons always fail.
6448 // Load whatever we need in r0 to make the comparison fail.
6449 if (cc == lt || cc == le) {
6450 __ mov(r0, Operand(GREATER));
6451 } else {
6452 __ mov(r0, Operand(LESS));
6453 }
6454 __ mov(pc, Operand(lr)); // Return.
6455
6456 __ bind(&neither_is_nan);
6457}
6458
6459
6460// See comment at call site.
6461static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
6462 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Leon Clarkee46be812010-01-19 14:06:41 +00006463 Register rhs_exponent = exp_first ? r0 : r1;
6464 Register lhs_exponent = exp_first ? r2 : r3;
6465 Register rhs_mantissa = exp_first ? r1 : r0;
6466 Register lhs_mantissa = exp_first ? r3 : r2;
Steve Blocka7e24c12009-10-30 11:49:00 +00006467
6468 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
6469 if (cc == eq) {
6470 // Doubles are not equal unless they have the same bit pattern.
6471 // Exception: 0 and -0.
Leon Clarkee46be812010-01-19 14:06:41 +00006472 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
6473 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
Steve Blocka7e24c12009-10-30 11:49:00 +00006474 // Return non-zero if the numbers are unequal.
6475 __ mov(pc, Operand(lr), LeaveCC, ne);
6476
Leon Clarkee46be812010-01-19 14:06:41 +00006477 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00006478 // If exponents are equal then return 0.
6479 __ mov(pc, Operand(lr), LeaveCC, eq);
6480
6481 // Exponents are unequal. The only way we can return that the numbers
6482 // are equal is if one is -0 and the other is 0. We already dealt
6483 // with the case where both are -0 or both are 0.
6484 // We start by seeing if the mantissas (that are equal) or the bottom
6485 // 31 bits of the rhs exponent are non-zero. If so we return not
6486 // equal.
Leon Clarkee46be812010-01-19 14:06:41 +00006487 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00006488 __ mov(r0, Operand(r4), LeaveCC, ne);
6489 __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
6490 // Now they are equal if and only if the lhs exponent is zero in its
6491 // low 31 bits.
Leon Clarkee46be812010-01-19 14:06:41 +00006492 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00006493 __ mov(pc, Operand(lr));
6494 } else {
6495 // Call a native function to do a comparison between two non-NaNs.
6496 // Call C routine that may not cause GC or other trouble.
Steve Block6ded16b2010-05-10 14:33:55 +01006497 __ push(lr);
6498 __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
6499 __ CallCFunction(ExternalReference::compare_doubles(), 4);
6500 __ pop(pc); // Return.
Steve Blocka7e24c12009-10-30 11:49:00 +00006501 }
6502}
6503
6504
6505// See comment at call site.
6506static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
6507 // If either operand is a JSObject or an oddball value, then they are
6508 // not equal since their pointers are different.
6509 // There is no test for undetectability in strict equality.
6510 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6511 Label first_non_object;
6512 // Get the type of the first operand into r2 and compare it with
6513 // FIRST_JS_OBJECT_TYPE.
6514 __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
6515 __ b(lt, &first_non_object);
6516
6517 // Return non-zero (r0 is not zero)
6518 Label return_not_equal;
6519 __ bind(&return_not_equal);
6520 __ mov(pc, Operand(lr)); // Return.
6521
6522 __ bind(&first_non_object);
6523 // Check for oddballs: true, false, null, undefined.
6524 __ cmp(r2, Operand(ODDBALL_TYPE));
6525 __ b(eq, &return_not_equal);
6526
6527 __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
6528 __ b(ge, &return_not_equal);
6529
6530 // Check for oddballs: true, false, null, undefined.
6531 __ cmp(r3, Operand(ODDBALL_TYPE));
6532 __ b(eq, &return_not_equal);
Leon Clarkee46be812010-01-19 14:06:41 +00006533
6534 // Now that we have the types we might as well check for symbol-symbol.
6535 // Ensure that no non-strings have the symbol bit set.
6536 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
6537 ASSERT(kSymbolTag != 0);
6538 __ and_(r2, r2, Operand(r3));
6539 __ tst(r2, Operand(kIsSymbolMask));
6540 __ b(ne, &return_not_equal);
Steve Blocka7e24c12009-10-30 11:49:00 +00006541}
6542
6543
6544// See comment at call site.
6545static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
6546 Label* both_loaded_as_doubles,
6547 Label* not_heap_numbers,
6548 Label* slow) {
Leon Clarkee46be812010-01-19 14:06:41 +00006549 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006550 __ b(ne, not_heap_numbers);
Leon Clarkee46be812010-01-19 14:06:41 +00006551 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
6552 __ cmp(r2, r3);
Steve Blocka7e24c12009-10-30 11:49:00 +00006553 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
6554
6555 // Both are heap numbers. Load them up then jump to the code we have
6556 // for that.
Leon Clarked91b9f72010-01-27 17:25:45 +00006557 if (CpuFeatures::IsSupported(VFP3)) {
6558 CpuFeatures::Scope scope(VFP3);
6559 __ sub(r7, r0, Operand(kHeapObjectTag));
6560 __ vldr(d6, r7, HeapNumber::kValueOffset);
6561 __ sub(r7, r1, Operand(kHeapObjectTag));
6562 __ vldr(d7, r7, HeapNumber::kValueOffset);
6563 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +01006564 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
6565 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
Leon Clarked91b9f72010-01-27 17:25:45 +00006566 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006567 __ jmp(both_loaded_as_doubles);
6568}
6569
6570
6571// Fast negative check for symbol-to-symbol equality.
6572static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
6573 // r2 is object type of r0.
Leon Clarkee46be812010-01-19 14:06:41 +00006574 // Ensure that no non-strings have the symbol bit set.
6575 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
6576 ASSERT(kSymbolTag != 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006577 __ tst(r2, Operand(kIsSymbolMask));
6578 __ b(eq, slow);
Leon Clarkee46be812010-01-19 14:06:41 +00006579 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
6580 __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00006581 __ tst(r3, Operand(kIsSymbolMask));
6582 __ b(eq, slow);
6583
6584 // Both are symbols. We already checked they weren't the same pointer
6585 // so they are not equal.
6586 __ mov(r0, Operand(1)); // Non-zero indicates not equal.
6587 __ mov(pc, Operand(lr)); // Return.
6588}
6589
6590
Steve Block6ded16b2010-05-10 14:33:55 +01006591void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
6592 Register object,
6593 Register result,
6594 Register scratch1,
6595 Register scratch2,
6596 Register scratch3,
6597 bool object_is_smi,
6598 Label* not_found) {
6599 // Use of registers. Register result is used as a temporary.
6600 Register number_string_cache = result;
6601 Register mask = scratch3;
6602
6603 // Load the number string cache.
6604 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
6605
6606 // Make the hash mask from the length of the number string cache. It
6607 // contains two elements (number and string) for each cache entry.
6608 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
6609 // Divide length by two (length is not a smi).
6610 __ mov(mask, Operand(mask, ASR, 1));
6611 __ sub(mask, mask, Operand(1)); // Make mask.
6612
6613 // Calculate the entry in the number string cache. The hash value in the
6614 // number string cache for smis is just the smi value, and the hash for
6615 // doubles is the xor of the upper and lower words. See
6616 // Heap::GetNumberStringCache.
6617 Label is_smi;
6618 Label load_result_from_cache;
6619 if (!object_is_smi) {
6620 __ BranchOnSmi(object, &is_smi);
6621 if (CpuFeatures::IsSupported(VFP3)) {
6622 CpuFeatures::Scope scope(VFP3);
6623 __ CheckMap(object,
6624 scratch1,
6625 Factory::heap_number_map(),
6626 not_found,
6627 true);
6628
6629 ASSERT_EQ(8, kDoubleSize);
6630 __ add(scratch1,
6631 object,
6632 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
6633 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
6634 __ eor(scratch1, scratch1, Operand(scratch2));
6635 __ and_(scratch1, scratch1, Operand(mask));
6636
6637 // Calculate address of entry in string cache: each entry consists
6638 // of two pointer sized fields.
6639 __ add(scratch1,
6640 number_string_cache,
6641 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
6642
6643 Register probe = mask;
6644 __ ldr(probe,
6645 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
6646 __ BranchOnSmi(probe, not_found);
6647 __ sub(scratch2, object, Operand(kHeapObjectTag));
6648 __ vldr(d0, scratch2, HeapNumber::kValueOffset);
6649 __ sub(probe, probe, Operand(kHeapObjectTag));
6650 __ vldr(d1, probe, HeapNumber::kValueOffset);
6651 __ vcmp(d0, d1);
6652 __ vmrs(pc);
6653 __ b(ne, not_found); // The cache did not contain this value.
6654 __ b(&load_result_from_cache);
6655 } else {
6656 __ b(not_found);
6657 }
6658 }
6659
6660 __ bind(&is_smi);
6661 Register scratch = scratch1;
6662 __ and_(scratch, mask, Operand(object, ASR, 1));
6663 // Calculate address of entry in string cache: each entry consists
6664 // of two pointer sized fields.
6665 __ add(scratch,
6666 number_string_cache,
6667 Operand(scratch, LSL, kPointerSizeLog2 + 1));
6668
6669 // Check if the entry is the smi we are looking for.
6670 Register probe = mask;
6671 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
6672 __ cmp(object, probe);
6673 __ b(ne, not_found);
6674
6675 // Get the result from the cache.
6676 __ bind(&load_result_from_cache);
6677 __ ldr(result,
6678 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
6679 __ IncrementCounter(&Counters::number_to_string_native,
6680 1,
6681 scratch1,
6682 scratch2);
6683}
6684
6685
6686void NumberToStringStub::Generate(MacroAssembler* masm) {
6687 Label runtime;
6688
6689 __ ldr(r1, MemOperand(sp, 0));
6690
6691 // Generate code to lookup number in the number string cache.
6692 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
6693 __ add(sp, sp, Operand(1 * kPointerSize));
6694 __ Ret();
6695
6696 __ bind(&runtime);
6697 // Handle number to string in the runtime system if not found in the cache.
6698 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
6699}
6700
6701
6702void RecordWriteStub::Generate(MacroAssembler* masm) {
6703 __ RecordWriteHelper(object_, offset_, scratch_);
6704 __ Ret();
6705}
6706
6707
Leon Clarked91b9f72010-01-27 17:25:45 +00006708// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
6709// On exit r0 is 0, positive or negative to indicate the result of
6710// the comparison.
Steve Blocka7e24c12009-10-30 11:49:00 +00006711void CompareStub::Generate(MacroAssembler* masm) {
6712 Label slow; // Call builtin.
Leon Clarkee46be812010-01-19 14:06:41 +00006713 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
Steve Blocka7e24c12009-10-30 11:49:00 +00006714
6715 // NOTICE! This code is only reached after a smi-fast-case check, so
6716 // it is certain that at least one operand isn't a smi.
6717
6718 // Handle the case where the objects are identical. Either returns the answer
6719 // or goes to slow. Only falls through if the objects were not identical.
Leon Clarkee46be812010-01-19 14:06:41 +00006720 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006721
6722 // If either is a Smi (we know that not both are), then they can only
6723 // be strictly equal if the other is a HeapNumber.
6724 ASSERT_EQ(0, kSmiTag);
6725 ASSERT_EQ(0, Smi::FromInt(0));
6726 __ and_(r2, r0, Operand(r1));
6727 __ tst(r2, Operand(kSmiTagMask));
6728 __ b(ne, &not_smis);
6729 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
6730 // 1) Return the answer.
6731 // 2) Go to slow.
6732 // 3) Fall through to both_loaded_as_doubles.
Leon Clarkee46be812010-01-19 14:06:41 +00006733 // 4) Jump to lhs_not_nan.
Steve Blocka7e24c12009-10-30 11:49:00 +00006734 // In cases 3 and 4 we have found out we were dealing with a number-number
Leon Clarked91b9f72010-01-27 17:25:45 +00006735 // comparison. If VFP3 is supported the double values of the numbers have
6736 // been loaded into d7 and d6. Otherwise, the double values have been loaded
6737 // into r0, r1, r2, and r3.
Leon Clarkee46be812010-01-19 14:06:41 +00006738 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
Steve Blocka7e24c12009-10-30 11:49:00 +00006739
6740 __ bind(&both_loaded_as_doubles);
Leon Clarked91b9f72010-01-27 17:25:45 +00006741 // The arguments have been converted to doubles and stored in d6 and d7, if
6742 // VFP3 is supported, or in r0, r1, r2, and r3.
Steve Blockd0582a62009-12-15 09:54:21 +00006743 if (CpuFeatures::IsSupported(VFP3)) {
Leon Clarkee46be812010-01-19 14:06:41 +00006744 __ bind(&lhs_not_nan);
Steve Blockd0582a62009-12-15 09:54:21 +00006745 CpuFeatures::Scope scope(VFP3);
Leon Clarkee46be812010-01-19 14:06:41 +00006746 Label no_nan;
Steve Blockd0582a62009-12-15 09:54:21 +00006747 // ARMv7 VFP3 instructions to implement double precision comparison.
Leon Clarkee46be812010-01-19 14:06:41 +00006748 __ vcmp(d7, d6);
6749 __ vmrs(pc); // Move vector status bits to normal status bits.
6750 Label nan;
6751 __ b(vs, &nan);
6752 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6753 __ mov(r0, Operand(LESS), LeaveCC, lt);
6754 __ mov(r0, Operand(GREATER), LeaveCC, gt);
6755 __ mov(pc, Operand(lr));
6756
6757 __ bind(&nan);
6758 // If one of the sides was a NaN then the v flag is set. Load r0 with
6759 // whatever it takes to make the comparison fail, since comparisons with NaN
6760 // always fail.
6761 if (cc_ == lt || cc_ == le) {
6762 __ mov(r0, Operand(GREATER));
6763 } else {
6764 __ mov(r0, Operand(LESS));
6765 }
Steve Blockd0582a62009-12-15 09:54:21 +00006766 __ mov(pc, Operand(lr));
6767 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00006768 // Checks for NaN in the doubles we have loaded. Can return the answer or
6769 // fall through if neither is a NaN. Also binds lhs_not_nan.
6770 EmitNanCheck(masm, &lhs_not_nan, cc_);
Steve Blockd0582a62009-12-15 09:54:21 +00006771 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
6772 // answer. Never falls through.
6773 EmitTwoNonNanDoubleComparison(masm, cc_);
6774 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006775
6776 __ bind(&not_smis);
6777 // At this point we know we are dealing with two different objects,
6778 // and neither of them is a Smi. The objects are in r0 and r1.
6779 if (strict_) {
6780 // This returns non-equal for some object types, or falls through if it
6781 // was not lucky.
6782 EmitStrictTwoHeapObjectCompare(masm);
6783 }
6784
6785 Label check_for_symbols;
Leon Clarked91b9f72010-01-27 17:25:45 +00006786 Label flat_string_check;
Steve Blocka7e24c12009-10-30 11:49:00 +00006787 // Check for heap-number-heap-number comparison. Can jump to slow case,
6788 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
6789 // that case. If the inputs are not doubles then jumps to check_for_symbols.
Leon Clarkee46be812010-01-19 14:06:41 +00006790 // In this case r2 will contain the type of r0. Never falls through.
Steve Blocka7e24c12009-10-30 11:49:00 +00006791 EmitCheckForTwoHeapNumbers(masm,
6792 &both_loaded_as_doubles,
6793 &check_for_symbols,
Leon Clarked91b9f72010-01-27 17:25:45 +00006794 &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00006795
6796 __ bind(&check_for_symbols);
Leon Clarkee46be812010-01-19 14:06:41 +00006797 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
6798 // symbols.
6799 if (cc_ == eq && !strict_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006800 // Either jumps to slow or returns the answer. Assumes that r2 is the type
6801 // of r0 on entry.
Leon Clarked91b9f72010-01-27 17:25:45 +00006802 EmitCheckForSymbols(masm, &flat_string_check);
Steve Blocka7e24c12009-10-30 11:49:00 +00006803 }
6804
Leon Clarked91b9f72010-01-27 17:25:45 +00006805 // Check for both being sequential ASCII strings, and inline if that is the
6806 // case.
6807 __ bind(&flat_string_check);
6808
6809 __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
6810
6811 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
6812 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
6813 r1,
6814 r0,
6815 r2,
6816 r3,
6817 r4,
6818 r5);
6819 // Never falls through to here.
6820
Steve Blocka7e24c12009-10-30 11:49:00 +00006821 __ bind(&slow);
Leon Clarked91b9f72010-01-27 17:25:45 +00006822
Steve Block6ded16b2010-05-10 14:33:55 +01006823 __ Push(r1, r0);
Steve Blocka7e24c12009-10-30 11:49:00 +00006824 // Figure out which native to call and setup the arguments.
6825 Builtins::JavaScript native;
Steve Blocka7e24c12009-10-30 11:49:00 +00006826 if (cc_ == eq) {
6827 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6828 } else {
6829 native = Builtins::COMPARE;
6830 int ncr; // NaN compare result
6831 if (cc_ == lt || cc_ == le) {
6832 ncr = GREATER;
6833 } else {
6834 ASSERT(cc_ == gt || cc_ == ge); // remaining cases
6835 ncr = LESS;
6836 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006837 __ mov(r0, Operand(Smi::FromInt(ncr)));
6838 __ push(r0);
6839 }
6840
6841 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6842 // tagged as a small integer.
Leon Clarkee46be812010-01-19 14:06:41 +00006843 __ InvokeBuiltin(native, JUMP_JS);
Steve Blocka7e24c12009-10-30 11:49:00 +00006844}
6845
6846
Steve Blocka7e24c12009-10-30 11:49:00 +00006847// We fall into this code if the operands were Smis, but the result was
6848// not (eg. overflow). We branch into this code (to the not_smi label) if
6849// the operands were not both Smi. The operands are in r0 and r1. In order
6850// to call the C-implemented binary fp operation routines we need to end up
6851// with the double precision floating point operands in r0 and r1 (for the
6852// value in r1) and r2 and r3 (for the value in r0).
Steve Block6ded16b2010-05-10 14:33:55 +01006853void GenericBinaryOpStub::HandleBinaryOpSlowCases(
6854 MacroAssembler* masm,
6855 Label* not_smi,
6856 Register lhs,
6857 Register rhs,
6858 const Builtins::JavaScript& builtin) {
6859 Label slow, slow_reverse, do_the_call;
6860 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
Steve Blockd0582a62009-12-15 09:54:21 +00006861
Steve Block6ded16b2010-05-10 14:33:55 +01006862 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
6863
6864 if (ShouldGenerateSmiCode()) {
6865 // Smi-smi case (overflow).
6866 // Since both are Smis there is no heap number to overwrite, so allocate.
6867 // The new heap number is in r5. r6 and r7 are scratch.
6868 __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
6869
6870 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
6871 // using registers d7 and d6 for the double values.
6872 if (use_fp_registers) {
6873 CpuFeatures::Scope scope(VFP3);
6874 __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
6875 __ vmov(s15, r7);
6876 __ vcvt_f64_s32(d7, s15);
6877 __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
6878 __ vmov(s13, r7);
6879 __ vcvt_f64_s32(d6, s13);
6880 } else {
6881 // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
6882 __ mov(r7, Operand(rhs));
6883 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6884 __ push(lr);
6885 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
6886 // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
6887 __ mov(r7, Operand(lhs));
6888 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6889 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
6890 __ pop(lr);
6891 }
6892 __ jmp(&do_the_call); // Tail call. No return.
Steve Blockd0582a62009-12-15 09:54:21 +00006893 }
6894
Steve Block6ded16b2010-05-10 14:33:55 +01006895 // We branch here if at least one of r0 and r1 is not a Smi.
6896 __ bind(not_smi);
6897
6898 // After this point we have the left hand side in r1 and the right hand side
6899 // in r0.
6900 if (lhs.is(r0)) {
6901 __ Swap(r0, r1, ip);
6902 }
6903
6904 if (ShouldGenerateFPCode()) {
6905 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
6906
6907 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
6908 switch (op_) {
6909 case Token::ADD:
6910 case Token::SUB:
6911 case Token::MUL:
6912 case Token::DIV:
6913 GenerateTypeTransition(masm);
6914 break;
6915
6916 default:
6917 break;
6918 }
6919 }
6920
6921 if (mode_ == NO_OVERWRITE) {
6922 // In the case where there is no chance of an overwritable float we may as
6923 // well do the allocation immediately while r0 and r1 are untouched.
6924 __ AllocateHeapNumber(r5, r6, r7, &slow);
6925 }
6926
6927 // Move r0 to a double in r2-r3.
6928 __ tst(r0, Operand(kSmiTagMask));
6929 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
6930 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6931 __ b(ne, &slow);
6932 if (mode_ == OVERWRITE_RIGHT) {
6933 __ mov(r5, Operand(r0)); // Overwrite this heap number.
6934 }
6935 if (use_fp_registers) {
6936 CpuFeatures::Scope scope(VFP3);
6937 // Load the double from tagged HeapNumber r0 to d7.
6938 __ sub(r7, r0, Operand(kHeapObjectTag));
6939 __ vldr(d7, r7, HeapNumber::kValueOffset);
6940 } else {
6941 // Calling convention says that second double is in r2 and r3.
Kristian Monsen25f61362010-05-21 11:50:48 +01006942 __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01006943 }
6944 __ jmp(&finished_loading_r0);
6945 __ bind(&r0_is_smi);
6946 if (mode_ == OVERWRITE_RIGHT) {
6947 // We can't overwrite a Smi so get address of new heap number into r5.
6948 __ AllocateHeapNumber(r5, r6, r7, &slow);
6949 }
6950
6951 if (use_fp_registers) {
6952 CpuFeatures::Scope scope(VFP3);
6953 // Convert smi in r0 to double in d7.
6954 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6955 __ vmov(s15, r7);
6956 __ vcvt_f64_s32(d7, s15);
6957 } else {
6958 // Write Smi from r0 to r3 and r2 in double format.
6959 __ mov(r7, Operand(r0));
6960 ConvertToDoubleStub stub3(r3, r2, r7, r6);
6961 __ push(lr);
6962 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
6963 __ pop(lr);
6964 }
6965
6966 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
6967 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
6968 Label r1_is_not_smi;
6969 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
6970 __ tst(r1, Operand(kSmiTagMask));
6971 __ b(ne, &r1_is_not_smi);
6972 GenerateTypeTransition(masm);
6973 __ jmp(&r1_is_smi);
6974 }
6975
6976 __ bind(&finished_loading_r0);
6977
6978 // Move r1 to a double in r0-r1.
6979 __ tst(r1, Operand(kSmiTagMask));
6980 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
6981 __ bind(&r1_is_not_smi);
6982 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
6983 __ b(ne, &slow);
6984 if (mode_ == OVERWRITE_LEFT) {
6985 __ mov(r5, Operand(r1)); // Overwrite this heap number.
6986 }
6987 if (use_fp_registers) {
6988 CpuFeatures::Scope scope(VFP3);
6989 // Load the double from tagged HeapNumber r1 to d6.
6990 __ sub(r7, r1, Operand(kHeapObjectTag));
6991 __ vldr(d6, r7, HeapNumber::kValueOffset);
6992 } else {
6993 // Calling convention says that first double is in r0 and r1.
Kristian Monsen25f61362010-05-21 11:50:48 +01006994 __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01006995 }
6996 __ jmp(&finished_loading_r1);
6997 __ bind(&r1_is_smi);
6998 if (mode_ == OVERWRITE_LEFT) {
6999 // We can't overwrite a Smi so get address of new heap number into r5.
7000 __ AllocateHeapNumber(r5, r6, r7, &slow);
7001 }
7002
7003 if (use_fp_registers) {
7004 CpuFeatures::Scope scope(VFP3);
7005 // Convert smi in r1 to double in d6.
7006 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7007 __ vmov(s13, r7);
7008 __ vcvt_f64_s32(d6, s13);
7009 } else {
7010 // Write Smi from r1 to r1 and r0 in double format.
7011 __ mov(r7, Operand(r1));
7012 ConvertToDoubleStub stub4(r1, r0, r7, r6);
7013 __ push(lr);
7014 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7015 __ pop(lr);
7016 }
7017
7018 __ bind(&finished_loading_r1);
7019
7020 __ bind(&do_the_call);
7021 // If we are inlining the operation using VFP3 instructions for
7022 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
7023 if (use_fp_registers) {
7024 CpuFeatures::Scope scope(VFP3);
7025 // ARMv7 VFP3 instructions to implement
7026 // double precision, add, subtract, multiply, divide.
7027
7028 if (Token::MUL == op_) {
7029 __ vmul(d5, d6, d7);
7030 } else if (Token::DIV == op_) {
7031 __ vdiv(d5, d6, d7);
7032 } else if (Token::ADD == op_) {
7033 __ vadd(d5, d6, d7);
7034 } else if (Token::SUB == op_) {
7035 __ vsub(d5, d6, d7);
7036 } else {
7037 UNREACHABLE();
7038 }
7039 __ sub(r0, r5, Operand(kHeapObjectTag));
7040 __ vstr(d5, r0, HeapNumber::kValueOffset);
7041 __ add(r0, r0, Operand(kHeapObjectTag));
7042 __ mov(pc, lr);
7043 } else {
7044 // If we did not inline the operation, then the arguments are in:
7045 // r0: Left value (least significant part of mantissa).
7046 // r1: Left value (sign, exponent, top of mantissa).
7047 // r2: Right value (least significant part of mantissa).
7048 // r3: Right value (sign, exponent, top of mantissa).
7049 // r5: Address of heap number for result.
7050
7051 __ push(lr); // For later.
7052 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
7053 // Call C routine that may not cause GC or other trouble. r5 is callee
7054 // save.
7055 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7056 // Store answer in the overwritable heap number.
7057 #if !defined(USE_ARM_EABI)
7058 // Double returned in fp coprocessor register 0 and 1, encoded as register
7059 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
7060 // substract the tag from r5.
7061 __ sub(r4, r5, Operand(kHeapObjectTag));
7062 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7063 #else
7064 // Double returned in registers 0 and 1.
Kristian Monsen25f61362010-05-21 11:50:48 +01007065 __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01007066 #endif
7067 __ mov(r0, Operand(r5));
7068 // And we are done.
7069 __ pop(pc);
7070 }
7071 }
7072
7073
7074 if (lhs.is(r0)) {
7075 __ b(&slow);
7076 __ bind(&slow_reverse);
7077 __ Swap(r0, r1, ip);
7078 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007079
7080 // We jump to here if something goes wrong (one param is not a number of any
7081 // sort or new-space allocation fails).
7082 __ bind(&slow);
Steve Blockd0582a62009-12-15 09:54:21 +00007083
7084 // Push arguments to the stack
Steve Block6ded16b2010-05-10 14:33:55 +01007085 __ Push(r1, r0);
Steve Blockd0582a62009-12-15 09:54:21 +00007086
Steve Block6ded16b2010-05-10 14:33:55 +01007087 if (Token::ADD == op_) {
Steve Blockd0582a62009-12-15 09:54:21 +00007088 // Test for string arguments before calling runtime.
7089 // r1 : first argument
7090 // r0 : second argument
7091 // sp[0] : second argument
Andrei Popescu31002712010-02-23 13:46:05 +00007092 // sp[4] : first argument
Steve Blockd0582a62009-12-15 09:54:21 +00007093
Steve Block6ded16b2010-05-10 14:33:55 +01007094 Label not_strings, not_string1, string1, string1_smi2;
Steve Blockd0582a62009-12-15 09:54:21 +00007095 __ tst(r1, Operand(kSmiTagMask));
7096 __ b(eq, &not_string1);
7097 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
7098 __ b(ge, &not_string1);
7099
7100 // First argument is a a string, test second.
7101 __ tst(r0, Operand(kSmiTagMask));
Steve Block6ded16b2010-05-10 14:33:55 +01007102 __ b(eq, &string1_smi2);
Steve Blockd0582a62009-12-15 09:54:21 +00007103 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7104 __ b(ge, &string1);
7105
7106 // First and second argument are strings.
Steve Block6ded16b2010-05-10 14:33:55 +01007107 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
7108 __ TailCallStub(&string_add_stub);
7109
7110 __ bind(&string1_smi2);
7111 // First argument is a string, second is a smi. Try to lookup the number
7112 // string for the smi in the number string cache.
7113 NumberToStringStub::GenerateLookupNumberStringCache(
7114 masm, r0, r2, r4, r5, r6, true, &string1);
7115
7116 // Replace second argument on stack and tailcall string add stub to make
7117 // the result.
7118 __ str(r2, MemOperand(sp, 0));
7119 __ TailCallStub(&string_add_stub);
Steve Blockd0582a62009-12-15 09:54:21 +00007120
7121 // Only first argument is a string.
7122 __ bind(&string1);
7123 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
7124
7125 // First argument was not a string, test second.
7126 __ bind(&not_string1);
7127 __ tst(r0, Operand(kSmiTagMask));
7128 __ b(eq, &not_strings);
7129 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7130 __ b(ge, &not_strings);
7131
7132 // Only second argument is a string.
Steve Blockd0582a62009-12-15 09:54:21 +00007133 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
7134
7135 __ bind(&not_strings);
7136 }
7137
Steve Blocka7e24c12009-10-30 11:49:00 +00007138 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
Steve Blocka7e24c12009-10-30 11:49:00 +00007139}
7140
7141
7142// Tries to get a signed int32 out of a double precision floating point heap
7143// number. Rounds towards 0. Fastest for doubles that are in the ranges
7144// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
7145// almost to the range of signed int32 values that are not Smis. Jumps to the
7146// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
7147// (excluding the endpoints).
7148static void GetInt32(MacroAssembler* masm,
7149 Register source,
7150 Register dest,
7151 Register scratch,
7152 Register scratch2,
7153 Label* slow) {
7154 Label right_exponent, done;
7155 // Get exponent word.
7156 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
7157 // Get exponent alone in scratch2.
7158 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
7159 // Load dest with zero. We use this either for the final shift or
7160 // for the answer.
7161 __ mov(dest, Operand(0));
7162 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
7163 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
7164 // the exponent that we are fastest at and also the highest exponent we can
7165 // handle here.
7166 const uint32_t non_smi_exponent =
7167 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
7168 __ cmp(scratch2, Operand(non_smi_exponent));
7169 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
7170 __ b(eq, &right_exponent);
7171 // If the exponent is higher than that then go to slow case. This catches
7172 // numbers that don't fit in a signed int32, infinities and NaNs.
7173 __ b(gt, slow);
7174
7175 // We know the exponent is smaller than 30 (biased). If it is less than
7176 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7177 // it rounds to zero.
7178 const uint32_t zero_exponent =
7179 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
7180 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
7181 // Dest already has a Smi zero.
7182 __ b(lt, &done);
Steve Blockd0582a62009-12-15 09:54:21 +00007183 if (!CpuFeatures::IsSupported(VFP3)) {
7184 // We have a shifted exponent between 0 and 30 in scratch2.
7185 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
7186 // We now have the exponent in dest. Subtract from 30 to get
7187 // how much to shift down.
7188 __ rsb(dest, dest, Operand(30));
7189 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007190 __ bind(&right_exponent);
Steve Blockd0582a62009-12-15 09:54:21 +00007191 if (CpuFeatures::IsSupported(VFP3)) {
7192 CpuFeatures::Scope scope(VFP3);
7193 // ARMv7 VFP3 instructions implementing double precision to integer
7194 // conversion using round to zero.
7195 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
Leon Clarkee46be812010-01-19 14:06:41 +00007196 __ vmov(d7, scratch2, scratch);
Steve Block6ded16b2010-05-10 14:33:55 +01007197 __ vcvt_s32_f64(s15, d7);
Leon Clarkee46be812010-01-19 14:06:41 +00007198 __ vmov(dest, s15);
Steve Blockd0582a62009-12-15 09:54:21 +00007199 } else {
7200 // Get the top bits of the mantissa.
7201 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
7202 // Put back the implicit 1.
7203 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
7204 // Shift up the mantissa bits to take up the space the exponent used to
7205 // take. We just orred in the implicit bit so that took care of one and
7206 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
7207 // distance.
7208 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7209 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
7210 // Put sign in zero flag.
7211 __ tst(scratch, Operand(HeapNumber::kSignMask));
7212 // Get the second half of the double. For some exponents we don't
7213 // actually need this because the bits get shifted out again, but
7214 // it's probably slower to test than just to do it.
7215 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7216 // Shift down 22 bits to get the last 10 bits.
7217 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
7218 // Move down according to the exponent.
7219 __ mov(dest, Operand(scratch, LSR, dest));
7220 // Fix sign if sign bit was set.
7221 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
7222 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007223 __ bind(&done);
7224}
7225
Steve Blocka7e24c12009-10-30 11:49:00 +00007226// For bitwise ops where the inputs are not both Smis we here try to determine
7227// whether both inputs are either Smis or at least heap numbers that can be
7228// represented by a 32 bit signed value. We truncate towards zero as required
7229// by the ES spec. If this is the case we do the bitwise op and see if the
7230// result is a Smi. If so, great, otherwise we try to find a heap number to
7231// write the answer into (either by allocating or by overwriting).
Steve Block6ded16b2010-05-10 14:33:55 +01007232// On entry the operands are in lhs and rhs. On exit the answer is in r0.
7233void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
7234 Register lhs,
7235 Register rhs) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007236 Label slow, result_not_a_smi;
Steve Block6ded16b2010-05-10 14:33:55 +01007237 Label rhs_is_smi, lhs_is_smi;
7238 Label done_checking_rhs, done_checking_lhs;
Steve Blocka7e24c12009-10-30 11:49:00 +00007239
Steve Block6ded16b2010-05-10 14:33:55 +01007240 __ tst(lhs, Operand(kSmiTagMask));
7241 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
7242 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00007243 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007244 GetInt32(masm, lhs, r3, r5, r4, &slow);
7245 __ jmp(&done_checking_lhs);
7246 __ bind(&lhs_is_smi);
7247 __ mov(r3, Operand(lhs, ASR, 1));
7248 __ bind(&done_checking_lhs);
Steve Blocka7e24c12009-10-30 11:49:00 +00007249
Steve Block6ded16b2010-05-10 14:33:55 +01007250 __ tst(rhs, Operand(kSmiTagMask));
7251 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
7252 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
Steve Blocka7e24c12009-10-30 11:49:00 +00007253 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007254 GetInt32(masm, rhs, r2, r5, r4, &slow);
7255 __ jmp(&done_checking_rhs);
7256 __ bind(&rhs_is_smi);
7257 __ mov(r2, Operand(rhs, ASR, 1));
7258 __ bind(&done_checking_rhs);
7259
7260 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
Steve Blocka7e24c12009-10-30 11:49:00 +00007261
7262 // r0 and r1: Original operands (Smi or heap numbers).
7263 // r2 and r3: Signed int32 operands.
7264 switch (op_) {
7265 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
7266 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
7267 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
7268 case Token::SAR:
7269 // Use only the 5 least significant bits of the shift count.
7270 __ and_(r2, r2, Operand(0x1f));
7271 __ mov(r2, Operand(r3, ASR, r2));
7272 break;
7273 case Token::SHR:
7274 // Use only the 5 least significant bits of the shift count.
7275 __ and_(r2, r2, Operand(0x1f));
7276 __ mov(r2, Operand(r3, LSR, r2), SetCC);
7277 // SHR is special because it is required to produce a positive answer.
7278 // The code below for writing into heap numbers isn't capable of writing
7279 // the register as an unsigned int so we go to slow case if we hit this
7280 // case.
7281 __ b(mi, &slow);
7282 break;
7283 case Token::SHL:
7284 // Use only the 5 least significant bits of the shift count.
7285 __ and_(r2, r2, Operand(0x1f));
7286 __ mov(r2, Operand(r3, LSL, r2));
7287 break;
7288 default: UNREACHABLE();
7289 }
7290 // check that the *signed* result fits in a smi
7291 __ add(r3, r2, Operand(0x40000000), SetCC);
7292 __ b(mi, &result_not_a_smi);
7293 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
7294 __ Ret();
7295
7296 Label have_to_allocate, got_a_heap_number;
7297 __ bind(&result_not_a_smi);
7298 switch (mode_) {
7299 case OVERWRITE_RIGHT: {
Steve Block6ded16b2010-05-10 14:33:55 +01007300 __ tst(rhs, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007301 __ b(eq, &have_to_allocate);
Steve Block6ded16b2010-05-10 14:33:55 +01007302 __ mov(r5, Operand(rhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00007303 break;
7304 }
7305 case OVERWRITE_LEFT: {
Steve Block6ded16b2010-05-10 14:33:55 +01007306 __ tst(lhs, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007307 __ b(eq, &have_to_allocate);
Steve Block6ded16b2010-05-10 14:33:55 +01007308 __ mov(r5, Operand(lhs));
Steve Blocka7e24c12009-10-30 11:49:00 +00007309 break;
7310 }
7311 case NO_OVERWRITE: {
7312 // Get a new heap number in r5. r6 and r7 are scratch.
Steve Block6ded16b2010-05-10 14:33:55 +01007313 __ AllocateHeapNumber(r5, r6, r7, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007314 }
7315 default: break;
7316 }
7317 __ bind(&got_a_heap_number);
7318 // r2: Answer as signed int32.
7319 // r5: Heap number to write answer into.
7320
7321 // Nothing can go wrong now, so move the heap number to r0, which is the
7322 // result.
7323 __ mov(r0, Operand(r5));
7324
7325 // Tail call that writes the int32 in r2 to the heap number in r0, using
7326 // r3 as scratch. r0 is preserved and returned.
7327 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
7328 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
7329
7330 if (mode_ != NO_OVERWRITE) {
7331 __ bind(&have_to_allocate);
7332 // Get a new heap number in r5. r6 and r7 are scratch.
Steve Block6ded16b2010-05-10 14:33:55 +01007333 __ AllocateHeapNumber(r5, r6, r7, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007334 __ jmp(&got_a_heap_number);
7335 }
7336
7337 // If all else failed then we go to the runtime system.
7338 __ bind(&slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007339 __ Push(lhs, rhs); // Restore stack.
Steve Blocka7e24c12009-10-30 11:49:00 +00007340 switch (op_) {
7341 case Token::BIT_OR:
7342 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
7343 break;
7344 case Token::BIT_AND:
7345 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
7346 break;
7347 case Token::BIT_XOR:
7348 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
7349 break;
7350 case Token::SAR:
7351 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
7352 break;
7353 case Token::SHR:
7354 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
7355 break;
7356 case Token::SHL:
7357 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
7358 break;
7359 default:
7360 UNREACHABLE();
7361 }
7362}
7363
7364
7365// Can we multiply by x with max two shifts and an add.
7366// This answers yes to all integers from 2 to 10.
7367static bool IsEasyToMultiplyBy(int x) {
7368 if (x < 2) return false; // Avoid special cases.
7369 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
7370 if (IsPowerOf2(x)) return true; // Simple shift.
7371 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
7372 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
7373 return false;
7374}
7375
7376
7377// Can multiply by anything that IsEasyToMultiplyBy returns true for.
7378// Source and destination may be the same register. This routine does
7379// not set carry and overflow the way a mul instruction would.
7380static void MultiplyByKnownInt(MacroAssembler* masm,
7381 Register source,
7382 Register destination,
7383 int known_int) {
7384 if (IsPowerOf2(known_int)) {
7385 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
7386 } else if (PopCountLessThanEqual2(known_int)) {
7387 int first_bit = BitPosition(known_int);
7388 int second_bit = BitPosition(known_int ^ (1 << first_bit));
7389 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
7390 if (first_bit != 0) {
7391 __ mov(destination, Operand(destination, LSL, first_bit));
7392 }
7393 } else {
7394 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
7395 int the_bit = BitPosition(known_int + 1);
7396 __ rsb(destination, source, Operand(source, LSL, the_bit));
7397 }
7398}
7399
7400
7401// This function (as opposed to MultiplyByKnownInt) takes the known int in a
7402// a register for the cases where it doesn't know a good trick, and may deliver
7403// a result that needs shifting.
7404static void MultiplyByKnownInt2(
7405 MacroAssembler* masm,
7406 Register result,
7407 Register source,
7408 Register known_int_register, // Smi tagged.
7409 int known_int,
7410 int* required_shift) { // Including Smi tag shift
7411 switch (known_int) {
7412 case 3:
7413 __ add(result, source, Operand(source, LSL, 1));
7414 *required_shift = 1;
7415 break;
7416 case 5:
7417 __ add(result, source, Operand(source, LSL, 2));
7418 *required_shift = 1;
7419 break;
7420 case 6:
7421 __ add(result, source, Operand(source, LSL, 1));
7422 *required_shift = 2;
7423 break;
7424 case 7:
7425 __ rsb(result, source, Operand(source, LSL, 3));
7426 *required_shift = 1;
7427 break;
7428 case 9:
7429 __ add(result, source, Operand(source, LSL, 3));
7430 *required_shift = 1;
7431 break;
7432 case 10:
7433 __ add(result, source, Operand(source, LSL, 2));
7434 *required_shift = 2;
7435 break;
7436 default:
7437 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
7438 __ mul(result, source, known_int_register);
7439 *required_shift = 0;
7440 }
7441}
7442
7443
Leon Clarkee46be812010-01-19 14:06:41 +00007444const char* GenericBinaryOpStub::GetName() {
7445 if (name_ != NULL) return name_;
7446 const int len = 100;
7447 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
7448 if (name_ == NULL) return "OOM";
7449 const char* op_name = Token::Name(op_);
7450 const char* overwrite_name;
7451 switch (mode_) {
7452 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7453 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7454 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7455 default: overwrite_name = "UnknownOverwrite"; break;
7456 }
7457
7458 OS::SNPrintF(Vector<char>(name_, len),
7459 "GenericBinaryOpStub_%s_%s%s",
7460 op_name,
7461 overwrite_name,
7462 specialized_on_rhs_ ? "_ConstantRhs" : 0);
7463 return name_;
7464}
7465
7466
Andrei Popescu31002712010-02-23 13:46:05 +00007467
Steve Blocka7e24c12009-10-30 11:49:00 +00007468void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Steve Block6ded16b2010-05-10 14:33:55 +01007469 // lhs_ : x
7470 // rhs_ : y
7471 // r0 : result
Steve Blocka7e24c12009-10-30 11:49:00 +00007472
Steve Block6ded16b2010-05-10 14:33:55 +01007473 Register result = r0;
7474 Register lhs = lhs_;
7475 Register rhs = rhs_;
7476
7477 // This code can't cope with other register allocations yet.
7478 ASSERT(result.is(r0) &&
7479 ((lhs.is(r0) && rhs.is(r1)) ||
7480 (lhs.is(r1) && rhs.is(r0))));
7481
7482 Register smi_test_reg = VirtualFrame::scratch0();
7483 Register scratch = VirtualFrame::scratch1();
7484
7485 // All ops need to know whether we are dealing with two Smis. Set up
7486 // smi_test_reg to tell us that.
7487 if (ShouldGenerateSmiCode()) {
7488 __ orr(smi_test_reg, lhs, Operand(rhs));
7489 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007490
7491 switch (op_) {
7492 case Token::ADD: {
7493 Label not_smi;
7494 // Fast path.
Steve Block6ded16b2010-05-10 14:33:55 +01007495 if (ShouldGenerateSmiCode()) {
7496 ASSERT(kSmiTag == 0); // Adjust code below.
7497 __ tst(smi_test_reg, Operand(kSmiTagMask));
7498 __ b(ne, &not_smi);
7499 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
7500 // Return if no overflow.
7501 __ Ret(vc);
7502 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
7503 }
7504 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
Steve Blocka7e24c12009-10-30 11:49:00 +00007505 break;
7506 }
7507
7508 case Token::SUB: {
7509 Label not_smi;
7510 // Fast path.
Steve Block6ded16b2010-05-10 14:33:55 +01007511 if (ShouldGenerateSmiCode()) {
7512 ASSERT(kSmiTag == 0); // Adjust code below.
7513 __ tst(smi_test_reg, Operand(kSmiTagMask));
7514 __ b(ne, &not_smi);
7515 if (lhs.is(r1)) {
7516 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
7517 // Return if no overflow.
7518 __ Ret(vc);
7519 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
7520 } else {
7521 __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
7522 // Return if no overflow.
7523 __ Ret(vc);
7524 __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
7525 }
7526 }
7527 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
Steve Blocka7e24c12009-10-30 11:49:00 +00007528 break;
7529 }
7530
7531 case Token::MUL: {
7532 Label not_smi, slow;
Steve Block6ded16b2010-05-10 14:33:55 +01007533 if (ShouldGenerateSmiCode()) {
7534 ASSERT(kSmiTag == 0); // adjust code below
7535 __ tst(smi_test_reg, Operand(kSmiTagMask));
7536 Register scratch2 = smi_test_reg;
7537 smi_test_reg = no_reg;
7538 __ b(ne, &not_smi);
7539 // Remove tag from one operand (but keep sign), so that result is Smi.
7540 __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
7541 // Do multiplication
7542 // scratch = lower 32 bits of ip * lhs.
7543 __ smull(scratch, scratch2, lhs, ip);
7544 // Go slow on overflows (overflow bit is not set).
7545 __ mov(ip, Operand(scratch, ASR, 31));
7546 // No overflow if higher 33 bits are identical.
7547 __ cmp(ip, Operand(scratch2));
7548 __ b(ne, &slow);
7549 // Go slow on zero result to handle -0.
7550 __ tst(scratch, Operand(scratch));
7551 __ mov(result, Operand(scratch), LeaveCC, ne);
7552 __ Ret(ne);
7553 // We need -0 if we were multiplying a negative number with 0 to get 0.
7554 // We know one of them was zero.
7555 __ add(scratch2, rhs, Operand(lhs), SetCC);
7556 __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
7557 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
7558 // Slow case. We fall through here if we multiplied a negative number
7559 // with 0, because that would mean we should produce -0.
7560 __ bind(&slow);
7561 }
7562 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
Steve Blocka7e24c12009-10-30 11:49:00 +00007563 break;
7564 }
7565
7566 case Token::DIV:
7567 case Token::MOD: {
7568 Label not_smi;
Steve Block6ded16b2010-05-10 14:33:55 +01007569 if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007570 Label smi_is_unsuitable;
Steve Block6ded16b2010-05-10 14:33:55 +01007571 __ BranchOnNotSmi(lhs, &not_smi);
Steve Blocka7e24c12009-10-30 11:49:00 +00007572 if (IsPowerOf2(constant_rhs_)) {
7573 if (op_ == Token::MOD) {
Steve Block6ded16b2010-05-10 14:33:55 +01007574 __ and_(rhs,
7575 lhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007576 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
7577 SetCC);
7578 // We now have the answer, but if the input was negative we also
7579 // have the sign bit. Our work is done if the result is
7580 // positive or zero:
Steve Block6ded16b2010-05-10 14:33:55 +01007581 if (!rhs.is(r0)) {
7582 __ mov(r0, rhs, LeaveCC, pl);
7583 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007584 __ Ret(pl);
7585 // A mod of a negative left hand side must return a negative number.
7586 // Unfortunately if the answer is 0 then we must return -0. And we
Steve Block6ded16b2010-05-10 14:33:55 +01007587 // already optimistically trashed rhs so we may need to restore it.
7588 __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00007589 // Next two instructions are conditional on the answer being -0.
Steve Block6ded16b2010-05-10 14:33:55 +01007590 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
Steve Blocka7e24c12009-10-30 11:49:00 +00007591 __ b(eq, &smi_is_unsuitable);
7592 // We need to subtract the dividend. Eg. -3 % 4 == -3.
Steve Block6ded16b2010-05-10 14:33:55 +01007593 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
Steve Blocka7e24c12009-10-30 11:49:00 +00007594 } else {
7595 ASSERT(op_ == Token::DIV);
Steve Block6ded16b2010-05-10 14:33:55 +01007596 __ tst(lhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007597 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
7598 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
7599 int shift = 0;
7600 int d = constant_rhs_;
7601 while ((d & 1) == 0) {
7602 d >>= 1;
7603 shift++;
7604 }
Steve Block6ded16b2010-05-10 14:33:55 +01007605 __ mov(r0, Operand(lhs, LSR, shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007606 __ bic(r0, r0, Operand(kSmiTagMask));
7607 }
7608 } else {
7609 // Not a power of 2.
Steve Block6ded16b2010-05-10 14:33:55 +01007610 __ tst(lhs, Operand(0x80000000u));
Steve Blocka7e24c12009-10-30 11:49:00 +00007611 __ b(ne, &smi_is_unsuitable);
7612 // Find a fixed point reciprocal of the divisor so we can divide by
7613 // multiplying.
7614 double divisor = 1.0 / constant_rhs_;
7615 int shift = 32;
7616 double scale = 4294967296.0; // 1 << 32.
7617 uint32_t mul;
7618 // Maximise the precision of the fixed point reciprocal.
7619 while (true) {
7620 mul = static_cast<uint32_t>(scale * divisor);
7621 if (mul >= 0x7fffffff) break;
7622 scale *= 2.0;
7623 shift++;
7624 }
7625 mul++;
Steve Block6ded16b2010-05-10 14:33:55 +01007626 Register scratch2 = smi_test_reg;
7627 smi_test_reg = no_reg;
7628 __ mov(scratch2, Operand(mul));
7629 __ umull(scratch, scratch2, scratch2, lhs);
7630 __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
7631 // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
7632 // rhs is still the known rhs. rhs is Smi tagged.
7633 // lhs is still the unkown lhs. lhs is Smi tagged.
7634 int required_scratch_shift = 0; // Including the Smi tag shift of 1.
7635 // scratch = scratch2 * rhs.
Steve Blocka7e24c12009-10-30 11:49:00 +00007636 MultiplyByKnownInt2(masm,
Steve Block6ded16b2010-05-10 14:33:55 +01007637 scratch,
7638 scratch2,
7639 rhs,
Steve Blocka7e24c12009-10-30 11:49:00 +00007640 constant_rhs_,
Steve Block6ded16b2010-05-10 14:33:55 +01007641 &required_scratch_shift);
7642 // scratch << required_scratch_shift is now the Smi tagged rhs *
7643 // (lhs / rhs) where / indicates integer division.
Steve Blocka7e24c12009-10-30 11:49:00 +00007644 if (op_ == Token::DIV) {
Steve Block6ded16b2010-05-10 14:33:55 +01007645 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007646 __ b(ne, &smi_is_unsuitable); // There was a remainder.
Steve Block6ded16b2010-05-10 14:33:55 +01007647 __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007648 } else {
7649 ASSERT(op_ == Token::MOD);
Steve Block6ded16b2010-05-10 14:33:55 +01007650 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
Steve Blocka7e24c12009-10-30 11:49:00 +00007651 }
7652 }
7653 __ Ret();
7654 __ bind(&smi_is_unsuitable);
Steve Blocka7e24c12009-10-30 11:49:00 +00007655 }
Steve Block6ded16b2010-05-10 14:33:55 +01007656 HandleBinaryOpSlowCases(
7657 masm,
7658 &not_smi,
7659 lhs,
7660 rhs,
7661 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
Steve Blocka7e24c12009-10-30 11:49:00 +00007662 break;
7663 }
7664
7665 case Token::BIT_OR:
7666 case Token::BIT_AND:
7667 case Token::BIT_XOR:
7668 case Token::SAR:
7669 case Token::SHR:
7670 case Token::SHL: {
7671 Label slow;
7672 ASSERT(kSmiTag == 0); // adjust code below
Steve Block6ded16b2010-05-10 14:33:55 +01007673 __ tst(smi_test_reg, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007674 __ b(ne, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007675 Register scratch2 = smi_test_reg;
7676 smi_test_reg = no_reg;
Steve Blocka7e24c12009-10-30 11:49:00 +00007677 switch (op_) {
Steve Block6ded16b2010-05-10 14:33:55 +01007678 case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
7679 case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
7680 case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00007681 case Token::SAR:
7682 // Remove tags from right operand.
Steve Block6ded16b2010-05-10 14:33:55 +01007683 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7684 __ mov(result, Operand(lhs, ASR, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007685 // Smi tag result.
Steve Block6ded16b2010-05-10 14:33:55 +01007686 __ bic(result, result, Operand(kSmiTagMask));
Steve Blocka7e24c12009-10-30 11:49:00 +00007687 break;
7688 case Token::SHR:
7689 // Remove tags from operands. We can't do this on a 31 bit number
7690 // because then the 0s get shifted into bit 30 instead of bit 31.
Steve Block6ded16b2010-05-10 14:33:55 +01007691 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
7692 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7693 __ mov(scratch, Operand(scratch, LSR, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007694 // Unsigned shift is not allowed to produce a negative number, so
7695 // check the sign bit and the sign bit after Smi tagging.
Steve Block6ded16b2010-05-10 14:33:55 +01007696 __ tst(scratch, Operand(0xc0000000));
Steve Blocka7e24c12009-10-30 11:49:00 +00007697 __ b(ne, &slow);
7698 // Smi tag result.
Steve Block6ded16b2010-05-10 14:33:55 +01007699 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007700 break;
7701 case Token::SHL:
7702 // Remove tags from operands.
Steve Block6ded16b2010-05-10 14:33:55 +01007703 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
7704 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
7705 __ mov(scratch, Operand(scratch, LSL, scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00007706 // Check that the signed result fits in a Smi.
Steve Block6ded16b2010-05-10 14:33:55 +01007707 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
Steve Blocka7e24c12009-10-30 11:49:00 +00007708 __ b(mi, &slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007709 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00007710 break;
7711 default: UNREACHABLE();
7712 }
7713 __ Ret();
7714 __ bind(&slow);
Steve Block6ded16b2010-05-10 14:33:55 +01007715 HandleNonSmiBitwiseOp(masm, lhs, rhs);
Steve Blocka7e24c12009-10-30 11:49:00 +00007716 break;
7717 }
7718
7719 default: UNREACHABLE();
7720 }
7721 // This code should be unreachable.
7722 __ stop("Unreachable");
Steve Block6ded16b2010-05-10 14:33:55 +01007723
7724 // Generate an unreachable reference to the DEFAULT stub so that it can be
7725 // found at the end of this stub when clearing ICs at GC.
7726 // TODO(kaznacheev): Check performance impact and get rid of this.
7727 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
7728 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
7729 __ CallStub(&uninit);
7730 }
7731}
7732
7733
7734void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
7735 Label get_result;
7736
7737 __ Push(r1, r0);
7738
7739 // Internal frame is necessary to handle exceptions properly.
7740 __ EnterInternalFrame();
7741 // Call the stub proper to get the result in r0.
7742 __ Call(&get_result);
7743 __ LeaveInternalFrame();
7744
7745 __ push(r0);
7746
7747 __ mov(r0, Operand(Smi::FromInt(MinorKey())));
7748 __ push(r0);
7749 __ mov(r0, Operand(Smi::FromInt(op_)));
7750 __ push(r0);
7751 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
7752 __ push(r0);
7753
7754 __ TailCallExternalReference(
7755 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
7756 6,
7757 1);
7758
7759 // The entry point for the result calculation is assumed to be immediately
7760 // after this sequence.
7761 __ bind(&get_result);
7762}
7763
7764
7765Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
7766 GenericBinaryOpStub stub(key, type_info);
7767 return stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00007768}
7769
7770
7771void StackCheckStub::Generate(MacroAssembler* masm) {
7772 // Do tail-call to runtime routine. Runtime routines expect at least one
7773 // argument, so give it a Smi.
7774 __ mov(r0, Operand(Smi::FromInt(0)));
7775 __ push(r0);
Steve Block6ded16b2010-05-10 14:33:55 +01007776 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00007777
7778 __ StubReturn(1);
7779}
7780
7781
Leon Clarkee46be812010-01-19 14:06:41 +00007782void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Leon Clarke4515c472010-02-03 11:58:03 +00007783 Label slow, done;
Leon Clarkee46be812010-01-19 14:06:41 +00007784
Leon Clarke4515c472010-02-03 11:58:03 +00007785 if (op_ == Token::SUB) {
7786 // Check whether the value is a smi.
7787 Label try_float;
7788 __ tst(r0, Operand(kSmiTagMask));
7789 __ b(ne, &try_float);
Steve Blocka7e24c12009-10-30 11:49:00 +00007790
Leon Clarke4515c472010-02-03 11:58:03 +00007791 // Go slow case if the value of the expression is zero
7792 // to make sure that we switch between 0 and -0.
7793 __ cmp(r0, Operand(0));
7794 __ b(eq, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007795
Leon Clarke4515c472010-02-03 11:58:03 +00007796 // The value of the expression is a smi that is not zero. Try
7797 // optimistic subtraction '0 - value'.
7798 __ rsb(r1, r0, Operand(0), SetCC);
7799 __ b(vs, &slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00007800
Leon Clarke4515c472010-02-03 11:58:03 +00007801 __ mov(r0, Operand(r1)); // Set r0 to result.
7802 __ b(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00007803
Leon Clarke4515c472010-02-03 11:58:03 +00007804 __ bind(&try_float);
7805 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
7806 __ b(ne, &slow);
7807 // r0 is a heap number. Get a new heap number in r1.
7808 if (overwrite_) {
7809 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7810 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
7811 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7812 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01007813 __ AllocateHeapNumber(r1, r2, r3, &slow);
Leon Clarke4515c472010-02-03 11:58:03 +00007814 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
7815 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7816 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
7817 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
7818 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
7819 __ mov(r0, Operand(r1));
7820 }
7821 } else if (op_ == Token::BIT_NOT) {
7822 // Check if the operand is a heap number.
7823 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
7824 __ b(ne, &slow);
7825
7826 // Convert the heap number is r0 to an untagged integer in r1.
7827 GetInt32(masm, r0, r1, r2, r3, &slow);
7828
7829 // Do the bitwise operation (move negated) and check if the result
7830 // fits in a smi.
7831 Label try_float;
7832 __ mvn(r1, Operand(r1));
7833 __ add(r2, r1, Operand(0x40000000), SetCC);
7834 __ b(mi, &try_float);
7835 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
7836 __ b(&done);
7837
7838 __ bind(&try_float);
7839 if (!overwrite_) {
7840 // Allocate a fresh heap number, but don't overwrite r0 until
7841 // we're sure we can do it without going through the slow case
7842 // that needs the value in r0.
Steve Block6ded16b2010-05-10 14:33:55 +01007843 __ AllocateHeapNumber(r2, r3, r4, &slow);
Leon Clarke4515c472010-02-03 11:58:03 +00007844 __ mov(r0, Operand(r2));
7845 }
7846
7847 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
7848 // have to set up a frame.
7849 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
7850 __ push(lr);
7851 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
7852 __ pop(lr);
7853 } else {
7854 UNIMPLEMENTED();
7855 }
7856
7857 __ bind(&done);
Steve Blocka7e24c12009-10-30 11:49:00 +00007858 __ StubReturn(1);
7859
Leon Clarke4515c472010-02-03 11:58:03 +00007860 // Handle the slow case by jumping to the JavaScript builtin.
Steve Blocka7e24c12009-10-30 11:49:00 +00007861 __ bind(&slow);
7862 __ push(r0);
Leon Clarke4515c472010-02-03 11:58:03 +00007863 switch (op_) {
7864 case Token::SUB:
7865 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
7866 break;
7867 case Token::BIT_NOT:
7868 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
7869 break;
7870 default:
7871 UNREACHABLE();
Steve Blocka7e24c12009-10-30 11:49:00 +00007872 }
Steve Blocka7e24c12009-10-30 11:49:00 +00007873}
7874
7875
7876void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
7877 // r0 holds the exception.
7878
7879 // Adjust this code if not the case.
7880 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7881
7882 // Drop the sp to the top of the handler.
7883 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
7884 __ ldr(sp, MemOperand(r3));
7885
7886 // Restore the next handler and frame pointer, discard handler state.
7887 ASSERT(StackHandlerConstants::kNextOffset == 0);
7888 __ pop(r2);
7889 __ str(r2, MemOperand(r3));
7890 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
7891 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
7892
7893 // Before returning we restore the context from the frame pointer if
7894 // not NULL. The frame pointer is NULL in the exception handler of a
7895 // JS entry frame.
7896 __ cmp(fp, Operand(0));
7897 // Set cp to NULL if fp is NULL.
7898 __ mov(cp, Operand(0), LeaveCC, eq);
7899 // Restore cp otherwise.
7900 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
7901#ifdef DEBUG
7902 if (FLAG_debug_code) {
7903 __ mov(lr, Operand(pc));
7904 }
7905#endif
7906 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7907 __ pop(pc);
7908}
7909
7910
7911void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
7912 UncatchableExceptionType type) {
7913 // Adjust this code if not the case.
7914 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
7915
7916 // Drop sp to the top stack handler.
7917 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
7918 __ ldr(sp, MemOperand(r3));
7919
7920 // Unwind the handlers until the ENTRY handler is found.
7921 Label loop, done;
7922 __ bind(&loop);
7923 // Load the type of the current stack handler.
7924 const int kStateOffset = StackHandlerConstants::kStateOffset;
7925 __ ldr(r2, MemOperand(sp, kStateOffset));
7926 __ cmp(r2, Operand(StackHandler::ENTRY));
7927 __ b(eq, &done);
7928 // Fetch the next handler in the list.
7929 const int kNextOffset = StackHandlerConstants::kNextOffset;
7930 __ ldr(sp, MemOperand(sp, kNextOffset));
7931 __ jmp(&loop);
7932 __ bind(&done);
7933
7934 // Set the top handler address to next handler past the current ENTRY handler.
7935 ASSERT(StackHandlerConstants::kNextOffset == 0);
7936 __ pop(r2);
7937 __ str(r2, MemOperand(r3));
7938
7939 if (type == OUT_OF_MEMORY) {
7940 // Set external caught exception to false.
7941 ExternalReference external_caught(Top::k_external_caught_exception_address);
7942 __ mov(r0, Operand(false));
7943 __ mov(r2, Operand(external_caught));
7944 __ str(r0, MemOperand(r2));
7945
7946 // Set pending exception and r0 to out of memory exception.
7947 Failure* out_of_memory = Failure::OutOfMemoryException();
7948 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
7949 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
7950 __ str(r0, MemOperand(r2));
7951 }
7952
7953 // Stack layout at this point. See also StackHandlerConstants.
7954 // sp -> state (ENTRY)
7955 // fp
7956 // lr
7957
7958 // Discard handler state (r2 is not used) and restore frame pointer.
7959 ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
7960 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
7961 // Before returning we restore the context from the frame pointer if
7962 // not NULL. The frame pointer is NULL in the exception handler of a
7963 // JS entry frame.
7964 __ cmp(fp, Operand(0));
7965 // Set cp to NULL if fp is NULL.
7966 __ mov(cp, Operand(0), LeaveCC, eq);
7967 // Restore cp otherwise.
7968 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
7969#ifdef DEBUG
7970 if (FLAG_debug_code) {
7971 __ mov(lr, Operand(pc));
7972 }
7973#endif
7974 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
7975 __ pop(pc);
7976}
7977
7978
7979void CEntryStub::GenerateCore(MacroAssembler* masm,
7980 Label* throw_normal_exception,
7981 Label* throw_termination_exception,
7982 Label* throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00007983 bool do_gc,
Steve Block6ded16b2010-05-10 14:33:55 +01007984 bool always_allocate,
7985 int frame_alignment_skew) {
Steve Blocka7e24c12009-10-30 11:49:00 +00007986 // r0: result parameter for PerformGC, if any
7987 // r4: number of arguments including receiver (C callee-saved)
7988 // r5: pointer to builtin function (C callee-saved)
7989 // r6: pointer to the first argument (C callee-saved)
7990
7991 if (do_gc) {
7992 // Passing r0.
Steve Block6ded16b2010-05-10 14:33:55 +01007993 __ PrepareCallCFunction(1, r1);
7994 __ CallCFunction(ExternalReference::perform_gc_function(), 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00007995 }
7996
7997 ExternalReference scope_depth =
7998 ExternalReference::heap_always_allocate_scope_depth();
7999 if (always_allocate) {
8000 __ mov(r0, Operand(scope_depth));
8001 __ ldr(r1, MemOperand(r0));
8002 __ add(r1, r1, Operand(1));
8003 __ str(r1, MemOperand(r0));
8004 }
8005
8006 // Call C built-in.
8007 // r0 = argc, r1 = argv
8008 __ mov(r0, Operand(r4));
8009 __ mov(r1, Operand(r6));
8010
Steve Block6ded16b2010-05-10 14:33:55 +01008011 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
8012 int frame_alignment_mask = frame_alignment - 1;
8013#if defined(V8_HOST_ARCH_ARM)
8014 if (FLAG_debug_code) {
8015 if (frame_alignment > kPointerSize) {
8016 Label alignment_as_expected;
8017 ASSERT(IsPowerOf2(frame_alignment));
8018 __ sub(r2, sp, Operand(frame_alignment_skew));
8019 __ tst(r2, Operand(frame_alignment_mask));
8020 __ b(eq, &alignment_as_expected);
8021 // Don't use Check here, as it will call Runtime_Abort re-entering here.
8022 __ stop("Unexpected alignment");
8023 __ bind(&alignment_as_expected);
8024 }
8025 }
8026#endif
8027
8028 // Just before the call (jump) below lr is pushed, so the actual alignment is
8029 // adding one to the current skew.
8030 int alignment_before_call =
8031 (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
8032 if (alignment_before_call > 0) {
8033 // Push until the alignment before the call is met.
8034 __ mov(r2, Operand(0));
8035 for (int i = alignment_before_call;
8036 (i & frame_alignment_mask) != 0;
8037 i += kPointerSize) {
8038 __ push(r2);
8039 }
8040 }
8041
Steve Blocka7e24c12009-10-30 11:49:00 +00008042 // TODO(1242173): To let the GC traverse the return address of the exit
8043 // frames, we need to know where the return address is. Right now,
8044 // we push it on the stack to be able to find it again, but we never
8045 // restore from it in case of changes, which makes it impossible to
8046 // support moving the C entry code stub. This should be fixed, but currently
8047 // this is OK because the CEntryStub gets generated so early in the V8 boot
8048 // sequence that it is not moving ever.
Steve Block6ded16b2010-05-10 14:33:55 +01008049 masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
Steve Blocka7e24c12009-10-30 11:49:00 +00008050 masm->push(lr);
8051 masm->Jump(r5);
8052
Steve Block6ded16b2010-05-10 14:33:55 +01008053 // Restore sp back to before aligning the stack.
8054 if (alignment_before_call > 0) {
8055 __ add(sp, sp, Operand(alignment_before_call));
8056 }
8057
Steve Blocka7e24c12009-10-30 11:49:00 +00008058 if (always_allocate) {
8059 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
8060 // though (contain the result).
8061 __ mov(r2, Operand(scope_depth));
8062 __ ldr(r3, MemOperand(r2));
8063 __ sub(r3, r3, Operand(1));
8064 __ str(r3, MemOperand(r2));
8065 }
8066
8067 // check for failure result
8068 Label failure_returned;
8069 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
8070 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
8071 __ add(r2, r0, Operand(1));
8072 __ tst(r2, Operand(kFailureTagMask));
8073 __ b(eq, &failure_returned);
8074
8075 // Exit C frame and return.
8076 // r0:r1: result
8077 // sp: stack pointer
8078 // fp: frame pointer
Leon Clarke4515c472010-02-03 11:58:03 +00008079 __ LeaveExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00008080
8081 // check if we should retry or throw exception
8082 Label retry;
8083 __ bind(&failure_returned);
8084 ASSERT(Failure::RETRY_AFTER_GC == 0);
8085 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
8086 __ b(eq, &retry);
8087
8088 // Special handling of out of memory exceptions.
8089 Failure* out_of_memory = Failure::OutOfMemoryException();
8090 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
8091 __ b(eq, throw_out_of_memory_exception);
8092
8093 // Retrieve the pending exception and clear the variable.
8094 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8095 __ ldr(r3, MemOperand(ip));
8096 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8097 __ ldr(r0, MemOperand(ip));
8098 __ str(r3, MemOperand(ip));
8099
8100 // Special handling of termination exceptions which are uncatchable
8101 // by javascript code.
8102 __ cmp(r0, Operand(Factory::termination_exception()));
8103 __ b(eq, throw_termination_exception);
8104
8105 // Handle normal exception.
8106 __ jmp(throw_normal_exception);
8107
8108 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
8109}
8110
8111
Leon Clarke4515c472010-02-03 11:58:03 +00008112void CEntryStub::Generate(MacroAssembler* masm) {
Steve Blocka7e24c12009-10-30 11:49:00 +00008113 // Called from JavaScript; parameters are on stack as if calling JS function
8114 // r0: number of arguments including receiver
8115 // r1: pointer to builtin function
8116 // fp: frame pointer (restored after C call)
8117 // sp: stack pointer (restored as callee's sp after C call)
8118 // cp: current context (C callee-saved)
8119
Leon Clarke4515c472010-02-03 11:58:03 +00008120 // Result returned in r0 or r0+r1 by default.
8121
Steve Blocka7e24c12009-10-30 11:49:00 +00008122 // NOTE: Invocations of builtins may return failure objects
8123 // instead of a proper result. The builtin entry handles
8124 // this by performing a garbage collection and retrying the
8125 // builtin once.
8126
Steve Blocka7e24c12009-10-30 11:49:00 +00008127 // Enter the exit frame that transitions from JavaScript to C++.
Leon Clarke4515c472010-02-03 11:58:03 +00008128 __ EnterExitFrame(mode_);
Steve Blocka7e24c12009-10-30 11:49:00 +00008129
8130 // r4: number of arguments (C callee-saved)
8131 // r5: pointer to builtin function (C callee-saved)
8132 // r6: pointer to first argument (C callee-saved)
8133
8134 Label throw_normal_exception;
8135 Label throw_termination_exception;
8136 Label throw_out_of_memory_exception;
8137
8138 // Call into the runtime system.
8139 GenerateCore(masm,
8140 &throw_normal_exception,
8141 &throw_termination_exception,
8142 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008143 false,
Steve Block6ded16b2010-05-10 14:33:55 +01008144 false,
8145 -kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00008146
8147 // Do space-specific GC and retry runtime call.
8148 GenerateCore(masm,
8149 &throw_normal_exception,
8150 &throw_termination_exception,
8151 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008152 true,
Steve Block6ded16b2010-05-10 14:33:55 +01008153 false,
8154 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00008155
8156 // Do full GC and retry runtime call one final time.
8157 Failure* failure = Failure::InternalError();
8158 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
8159 GenerateCore(masm,
8160 &throw_normal_exception,
8161 &throw_termination_exception,
8162 &throw_out_of_memory_exception,
Steve Blocka7e24c12009-10-30 11:49:00 +00008163 true,
Steve Block6ded16b2010-05-10 14:33:55 +01008164 true,
8165 kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00008166
8167 __ bind(&throw_out_of_memory_exception);
8168 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
8169
8170 __ bind(&throw_termination_exception);
8171 GenerateThrowUncatchable(masm, TERMINATION);
8172
8173 __ bind(&throw_normal_exception);
8174 GenerateThrowTOS(masm);
8175}
8176
8177
8178void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
8179 // r0: code entry
8180 // r1: function
8181 // r2: receiver
8182 // r3: argc
8183 // [sp+0]: argv
8184
8185 Label invoke, exit;
8186
8187 // Called from C, so do not pop argc and args on exit (preserve sp)
8188 // No need to save register-passed args
8189 // Save callee-saved registers (incl. cp and fp), sp, and lr
8190 __ stm(db_w, sp, kCalleeSaved | lr.bit());
8191
8192 // Get address of argv, see stm above.
8193 // r0: code entry
8194 // r1: function
8195 // r2: receiver
8196 // r3: argc
Leon Clarke4515c472010-02-03 11:58:03 +00008197 __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
Steve Blocka7e24c12009-10-30 11:49:00 +00008198
8199 // Push a frame with special values setup to mark it as an entry frame.
8200 // r0: code entry
8201 // r1: function
8202 // r2: receiver
8203 // r3: argc
8204 // r4: argv
8205 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
8206 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
8207 __ mov(r7, Operand(Smi::FromInt(marker)));
8208 __ mov(r6, Operand(Smi::FromInt(marker)));
8209 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8210 __ ldr(r5, MemOperand(r5));
Steve Block6ded16b2010-05-10 14:33:55 +01008211 __ Push(r8, r7, r6, r5);
Steve Blocka7e24c12009-10-30 11:49:00 +00008212
8213 // Setup frame pointer for the frame to be pushed.
8214 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8215
8216 // Call a faked try-block that does the invoke.
8217 __ bl(&invoke);
8218
8219 // Caught exception: Store result (exception) in the pending
8220 // exception field in the JSEnv and return a failure sentinel.
8221 // Coming in here the fp will be invalid because the PushTryHandler below
8222 // sets it to 0 to signal the existence of the JSEntry frame.
8223 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8224 __ str(r0, MemOperand(ip));
8225 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
8226 __ b(&exit);
8227
8228 // Invoke: Link this frame into the handler chain.
8229 __ bind(&invoke);
8230 // Must preserve r0-r4, r5-r7 are available.
8231 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
8232 // If an exception not caught by another handler occurs, this handler
8233 // returns control to the code after the bl(&invoke) above, which
8234 // restores all kCalleeSaved registers (including cp and fp) to their
8235 // saved values before returning a failure to C.
8236
8237 // Clear any pending exceptions.
8238 __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8239 __ ldr(r5, MemOperand(ip));
8240 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8241 __ str(r5, MemOperand(ip));
8242
8243 // Invoke the function by calling through JS entry trampoline builtin.
8244 // Notice that we cannot store a reference to the trampoline code directly in
8245 // this stub, because runtime stubs are not traversed when doing GC.
8246
8247 // Expected registers by Builtins::JSEntryTrampoline
8248 // r0: code entry
8249 // r1: function
8250 // r2: receiver
8251 // r3: argc
8252 // r4: argv
8253 if (is_construct) {
8254 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
8255 __ mov(ip, Operand(construct_entry));
8256 } else {
8257 ExternalReference entry(Builtins::JSEntryTrampoline);
8258 __ mov(ip, Operand(entry));
8259 }
8260 __ ldr(ip, MemOperand(ip)); // deref address
8261
8262 // Branch and link to JSEntryTrampoline. We don't use the double underscore
8263 // macro for the add instruction because we don't want the coverage tool
8264 // inserting instructions here after we read the pc.
8265 __ mov(lr, Operand(pc));
8266 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
8267
8268 // Unlink this frame from the handler chain. When reading the
8269 // address of the next handler, there is no need to use the address
8270 // displacement since the current stack pointer (sp) points directly
8271 // to the stack handler.
8272 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
8273 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
8274 __ str(r3, MemOperand(ip));
8275 // No need to restore registers
8276 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
8277
8278
8279 __ bind(&exit); // r0 holds result
8280 // Restore the top frame descriptors from the stack.
8281 __ pop(r3);
8282 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8283 __ str(r3, MemOperand(ip));
8284
8285 // Reset the stack to the callee saved registers.
8286 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8287
8288 // Restore callee-saved registers and return.
8289#ifdef DEBUG
8290 if (FLAG_debug_code) {
8291 __ mov(lr, Operand(pc));
8292 }
8293#endif
8294 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
8295}
8296
8297
8298// This stub performs an instanceof, calling the builtin function if
8299// necessary. Uses r1 for the object, r0 for the function that it may
8300// be an instance of (these are fetched from the stack).
8301void InstanceofStub::Generate(MacroAssembler* masm) {
8302 // Get the object - slow case for smis (we may need to throw an exception
8303 // depending on the rhs).
8304 Label slow, loop, is_instance, is_not_instance;
8305 __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
8306 __ BranchOnSmi(r0, &slow);
8307
8308 // Check that the left hand is a JS object and put map in r3.
8309 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
8310 __ b(lt, &slow);
8311 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
8312 __ b(gt, &slow);
8313
8314 // Get the prototype of the function (r4 is result, r2 is scratch).
Andrei Popescu402d9372010-02-26 13:31:12 +00008315 __ ldr(r1, MemOperand(sp, 0));
Kristian Monsen25f61362010-05-21 11:50:48 +01008316 // r1 is function, r3 is map.
8317
8318 // Look up the function and the map in the instanceof cache.
8319 Label miss;
8320 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
8321 __ cmp(r1, ip);
8322 __ b(ne, &miss);
8323 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
8324 __ cmp(r3, ip);
8325 __ b(ne, &miss);
8326 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
8327 __ pop();
8328 __ pop();
8329 __ mov(pc, Operand(lr));
8330
8331 __ bind(&miss);
Steve Blocka7e24c12009-10-30 11:49:00 +00008332 __ TryGetFunctionPrototype(r1, r4, r2, &slow);
8333
8334 // Check that the function prototype is a JS object.
8335 __ BranchOnSmi(r4, &slow);
8336 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
8337 __ b(lt, &slow);
8338 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
8339 __ b(gt, &slow);
8340
Kristian Monsen25f61362010-05-21 11:50:48 +01008341 __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
8342 __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
8343
Steve Blocka7e24c12009-10-30 11:49:00 +00008344 // Register mapping: r3 is object map and r4 is function prototype.
8345 // Get prototype of object into r2.
8346 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
8347
8348 // Loop through the prototype chain looking for the function prototype.
8349 __ bind(&loop);
8350 __ cmp(r2, Operand(r4));
8351 __ b(eq, &is_instance);
8352 __ LoadRoot(ip, Heap::kNullValueRootIndex);
8353 __ cmp(r2, ip);
8354 __ b(eq, &is_not_instance);
8355 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
8356 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
8357 __ jmp(&loop);
8358
8359 __ bind(&is_instance);
8360 __ mov(r0, Operand(Smi::FromInt(0)));
Kristian Monsen25f61362010-05-21 11:50:48 +01008361 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00008362 __ pop();
8363 __ pop();
8364 __ mov(pc, Operand(lr)); // Return.
8365
8366 __ bind(&is_not_instance);
8367 __ mov(r0, Operand(Smi::FromInt(1)));
Kristian Monsen25f61362010-05-21 11:50:48 +01008368 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
Steve Blocka7e24c12009-10-30 11:49:00 +00008369 __ pop();
8370 __ pop();
8371 __ mov(pc, Operand(lr)); // Return.
8372
8373 // Slow-case. Tail call builtin.
8374 __ bind(&slow);
Steve Blocka7e24c12009-10-30 11:49:00 +00008375 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
8376}
8377
8378
Steve Blocka7e24c12009-10-30 11:49:00 +00008379void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
8380 // The displacement is the offset of the last parameter (if any)
8381 // relative to the frame pointer.
8382 static const int kDisplacement =
8383 StandardFrameConstants::kCallerSPOffset - kPointerSize;
8384
8385 // Check that the key is a smi.
8386 Label slow;
8387 __ BranchOnNotSmi(r1, &slow);
8388
8389 // Check if the calling frame is an arguments adaptor frame.
8390 Label adaptor;
8391 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
8392 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
8393 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
8394 __ b(eq, &adaptor);
8395
8396 // Check index against formal parameters count limit passed in
Steve Blockd0582a62009-12-15 09:54:21 +00008397 // through register r0. Use unsigned comparison to get negative
Steve Blocka7e24c12009-10-30 11:49:00 +00008398 // check for free.
8399 __ cmp(r1, r0);
8400 __ b(cs, &slow);
8401
8402 // Read the argument from the stack and return it.
8403 __ sub(r3, r0, r1);
8404 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
8405 __ ldr(r0, MemOperand(r3, kDisplacement));
8406 __ Jump(lr);
8407
8408 // Arguments adaptor case: Check index against actual arguments
8409 // limit found in the arguments adaptor frame. Use unsigned
8410 // comparison to get negative check for free.
8411 __ bind(&adaptor);
8412 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
8413 __ cmp(r1, r0);
8414 __ b(cs, &slow);
8415
8416 // Read the argument from the adaptor frame and return it.
8417 __ sub(r3, r0, r1);
8418 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
8419 __ ldr(r0, MemOperand(r3, kDisplacement));
8420 __ Jump(lr);
8421
8422 // Slow-case: Handle non-smi or out-of-bounds access to arguments
8423 // by calling the runtime system.
8424 __ bind(&slow);
8425 __ push(r1);
Steve Block6ded16b2010-05-10 14:33:55 +01008426 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Blocka7e24c12009-10-30 11:49:00 +00008427}
8428
8429
8430void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Andrei Popescu402d9372010-02-26 13:31:12 +00008431 // sp[0] : number of parameters
8432 // sp[4] : receiver displacement
8433 // sp[8] : function
8434
Steve Blocka7e24c12009-10-30 11:49:00 +00008435 // Check if the calling frame is an arguments adaptor frame.
Andrei Popescu402d9372010-02-26 13:31:12 +00008436 Label adaptor_frame, try_allocate, runtime;
Steve Blocka7e24c12009-10-30 11:49:00 +00008437 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
8438 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
8439 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Andrei Popescu402d9372010-02-26 13:31:12 +00008440 __ b(eq, &adaptor_frame);
8441
8442 // Get the length from the frame.
8443 __ ldr(r1, MemOperand(sp, 0));
8444 __ b(&try_allocate);
Steve Blocka7e24c12009-10-30 11:49:00 +00008445
8446 // Patch the arguments.length and the parameters pointer.
Andrei Popescu402d9372010-02-26 13:31:12 +00008447 __ bind(&adaptor_frame);
8448 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
8449 __ str(r1, MemOperand(sp, 0));
8450 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00008451 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
8452 __ str(r3, MemOperand(sp, 1 * kPointerSize));
8453
Andrei Popescu402d9372010-02-26 13:31:12 +00008454 // Try the new space allocation. Start out with computing the size
Kristian Monsen25f61362010-05-21 11:50:48 +01008455 // of the arguments object and the elements array in words.
Andrei Popescu402d9372010-02-26 13:31:12 +00008456 Label add_arguments_object;
8457 __ bind(&try_allocate);
8458 __ cmp(r1, Operand(0));
8459 __ b(eq, &add_arguments_object);
8460 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
8461 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
8462 __ bind(&add_arguments_object);
8463 __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
8464
8465 // Do the allocation of both objects in one go.
Kristian Monsen25f61362010-05-21 11:50:48 +01008466 __ AllocateInNewSpace(
8467 r1,
8468 r0,
8469 r2,
8470 r3,
8471 &runtime,
8472 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
Andrei Popescu402d9372010-02-26 13:31:12 +00008473
8474 // Get the arguments boilerplate from the current (global) context.
8475 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
8476 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
8477 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
8478 __ ldr(r4, MemOperand(r4, offset));
8479
8480 // Copy the JS object part.
8481 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
8482 __ ldr(r3, FieldMemOperand(r4, i));
8483 __ str(r3, FieldMemOperand(r0, i));
8484 }
8485
8486 // Setup the callee in-object property.
8487 ASSERT(Heap::arguments_callee_index == 0);
8488 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
8489 __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
8490
8491 // Get the length (smi tagged) and set that as an in-object property too.
8492 ASSERT(Heap::arguments_length_index == 1);
8493 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
8494 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
8495
8496 // If there are no actual arguments, we're done.
8497 Label done;
8498 __ cmp(r1, Operand(0));
8499 __ b(eq, &done);
8500
8501 // Get the parameters pointer from the stack and untag the length.
8502 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
8503 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
8504
8505 // Setup the elements pointer in the allocated arguments object and
8506 // initialize the header in the elements fixed array.
8507 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
8508 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
8509 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
8510 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
8511 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
8512
8513 // Copy the fixed array slots.
8514 Label loop;
8515 // Setup r4 to point to the first array slot.
8516 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
8517 __ bind(&loop);
8518 // Pre-decrement r2 with kPointerSize on each iteration.
8519 // Pre-decrement in order to skip receiver.
8520 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
8521 // Post-increment r4 with kPointerSize on each iteration.
8522 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
8523 __ sub(r1, r1, Operand(1));
8524 __ cmp(r1, Operand(0));
8525 __ b(ne, &loop);
8526
8527 // Return and remove the on-stack parameters.
8528 __ bind(&done);
8529 __ add(sp, sp, Operand(3 * kPointerSize));
8530 __ Ret();
8531
Steve Blocka7e24c12009-10-30 11:49:00 +00008532 // Do the runtime call to allocate the arguments object.
8533 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008534 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
8535}
8536
8537
8538void RegExpExecStub::Generate(MacroAssembler* masm) {
8539 // Just jump directly to runtime if native RegExp is not selected at compile
8540 // time or if regexp entry in generated code is turned off runtime switch or
8541 // at compilation.
Kristian Monsen25f61362010-05-21 11:50:48 +01008542#ifdef V8_INTERPRETED_REGEXP
Steve Block6ded16b2010-05-10 14:33:55 +01008543 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01008544#else // V8_INTERPRETED_REGEXP
Steve Block6ded16b2010-05-10 14:33:55 +01008545 if (!FLAG_regexp_entry_native) {
8546 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
8547 return;
8548 }
8549
8550 // Stack frame on entry.
8551 // sp[0]: last_match_info (expected JSArray)
8552 // sp[4]: previous index
8553 // sp[8]: subject string
8554 // sp[12]: JSRegExp object
8555
8556 static const int kLastMatchInfoOffset = 0 * kPointerSize;
8557 static const int kPreviousIndexOffset = 1 * kPointerSize;
8558 static const int kSubjectOffset = 2 * kPointerSize;
8559 static const int kJSRegExpOffset = 3 * kPointerSize;
8560
8561 Label runtime, invoke_regexp;
8562
8563 // Allocation of registers for this function. These are in callee save
8564 // registers and will be preserved by the call to the native RegExp code, as
8565 // this code is called using the normal C calling convention. When calling
8566 // directly from generated code the native RegExp code will not do a GC and
8567 // therefore the content of these registers are safe to use after the call.
8568 Register subject = r4;
8569 Register regexp_data = r5;
8570 Register last_match_info_elements = r6;
8571
8572 // Ensure that a RegExp stack is allocated.
8573 ExternalReference address_of_regexp_stack_memory_address =
8574 ExternalReference::address_of_regexp_stack_memory_address();
8575 ExternalReference address_of_regexp_stack_memory_size =
8576 ExternalReference::address_of_regexp_stack_memory_size();
8577 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
8578 __ ldr(r0, MemOperand(r0, 0));
8579 __ tst(r0, Operand(r0));
8580 __ b(eq, &runtime);
8581
8582 // Check that the first argument is a JSRegExp object.
8583 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
8584 ASSERT_EQ(0, kSmiTag);
8585 __ tst(r0, Operand(kSmiTagMask));
8586 __ b(eq, &runtime);
8587 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
8588 __ b(ne, &runtime);
8589
8590 // Check that the RegExp has been compiled (data contains a fixed array).
8591 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
8592 if (FLAG_debug_code) {
8593 __ tst(regexp_data, Operand(kSmiTagMask));
8594 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
8595 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
8596 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
8597 }
8598
8599 // regexp_data: RegExp data (FixedArray)
8600 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
8601 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
8602 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
8603 __ b(ne, &runtime);
8604
8605 // regexp_data: RegExp data (FixedArray)
8606 // Check that the number of captures fit in the static offsets vector buffer.
8607 __ ldr(r2,
8608 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
8609 // Calculate number of capture registers (number_of_captures + 1) * 2. This
8610 // uses the asumption that smis are 2 * their untagged value.
8611 ASSERT_EQ(0, kSmiTag);
8612 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
8613 __ add(r2, r2, Operand(2)); // r2 was a smi.
8614 // Check that the static offsets vector buffer is large enough.
8615 __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
8616 __ b(hi, &runtime);
8617
8618 // r2: Number of capture registers
8619 // regexp_data: RegExp data (FixedArray)
8620 // Check that the second argument is a string.
8621 __ ldr(subject, MemOperand(sp, kSubjectOffset));
8622 __ tst(subject, Operand(kSmiTagMask));
8623 __ b(eq, &runtime);
8624 Condition is_string = masm->IsObjectStringType(subject, r0);
8625 __ b(NegateCondition(is_string), &runtime);
8626 // Get the length of the string to r3.
8627 __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
8628
8629 // r2: Number of capture registers
8630 // r3: Length of subject string as a smi
8631 // subject: Subject string
8632 // regexp_data: RegExp data (FixedArray)
8633 // Check that the third argument is a positive smi less than the subject
8634 // string length. A negative value will be greater (unsigned comparison).
8635 __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
8636 __ tst(r0, Operand(kSmiTagMask));
Kristian Monsen25f61362010-05-21 11:50:48 +01008637 __ b(ne, &runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008638 __ cmp(r3, Operand(r0));
Kristian Monsen25f61362010-05-21 11:50:48 +01008639 __ b(ls, &runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01008640
8641 // r2: Number of capture registers
8642 // subject: Subject string
8643 // regexp_data: RegExp data (FixedArray)
8644 // Check that the fourth object is a JSArray object.
8645 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
8646 __ tst(r0, Operand(kSmiTagMask));
8647 __ b(eq, &runtime);
8648 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
8649 __ b(ne, &runtime);
8650 // Check that the JSArray is in fast case.
8651 __ ldr(last_match_info_elements,
8652 FieldMemOperand(r0, JSArray::kElementsOffset));
8653 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01008654 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
Steve Block6ded16b2010-05-10 14:33:55 +01008655 __ cmp(r0, ip);
8656 __ b(ne, &runtime);
8657 // Check that the last match info has space for the capture registers and the
8658 // additional information.
8659 __ ldr(r0,
8660 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
8661 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
8662 __ cmp(r2, r0);
8663 __ b(gt, &runtime);
8664
8665 // subject: Subject string
8666 // regexp_data: RegExp data (FixedArray)
8667 // Check the representation and encoding of the subject string.
8668 Label seq_string;
8669 const int kStringRepresentationEncodingMask =
8670 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
8671 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
8672 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
8673 __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
8674 // First check for sequential string.
8675 ASSERT_EQ(0, kStringTag);
8676 ASSERT_EQ(0, kSeqStringTag);
8677 __ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
8678 __ b(eq, &seq_string);
8679
8680 // subject: Subject string
8681 // regexp_data: RegExp data (FixedArray)
8682 // Check for flat cons string.
8683 // A flat cons string is a cons string where the second part is the empty
8684 // string. In that case the subject string is just the first part of the cons
8685 // string. Also in this case the first part of the cons string is known to be
8686 // a sequential string or an external string.
8687 __ and_(r0, r0, Operand(kStringRepresentationMask));
8688 __ cmp(r0, Operand(kConsStringTag));
8689 __ b(ne, &runtime);
8690 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
8691 __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
8692 __ cmp(r0, r1);
8693 __ b(ne, &runtime);
8694 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
8695 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
8696 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
8697 ASSERT_EQ(0, kSeqStringTag);
8698 __ tst(r0, Operand(kStringRepresentationMask));
8699 __ b(nz, &runtime);
8700 __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
8701
8702 __ bind(&seq_string);
8703 // r1: suject string type & kStringRepresentationEncodingMask
8704 // subject: Subject string
8705 // regexp_data: RegExp data (FixedArray)
8706 // Check that the irregexp code has been generated for an ascii string. If
8707 // it has, the field contains a code object otherwise it contains the hole.
8708#ifdef DEBUG
8709 const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
8710 const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
8711 CHECK_EQ(4, kSeqAsciiString);
8712 CHECK_EQ(0, kSeqTwoByteString);
8713#endif
8714 // Find the code object based on the assumptions above.
8715 __ mov(r3, Operand(r1, ASR, 2), SetCC);
8716 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
8717 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
8718
8719 // Check that the irregexp code has been generated for the actual string
8720 // encoding. If it has, the field contains a code object otherwise it contains
8721 // the hole.
8722 __ CompareObjectType(r7, r0, r0, CODE_TYPE);
8723 __ b(ne, &runtime);
8724
8725 // r3: encoding of subject string (1 if ascii, 0 if two_byte);
8726 // r7: code
8727 // subject: Subject string
8728 // regexp_data: RegExp data (FixedArray)
8729 // Load used arguments before starting to push arguments for call to native
8730 // RegExp code to avoid handling changing stack height.
8731 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
8732 __ mov(r1, Operand(r1, ASR, kSmiTagSize));
8733
8734 // r1: previous index
8735 // r3: encoding of subject string (1 if ascii, 0 if two_byte);
8736 // r7: code
8737 // subject: Subject string
8738 // regexp_data: RegExp data (FixedArray)
8739 // All checks done. Now push arguments for native regexp code.
8740 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
8741
8742 static const int kRegExpExecuteArguments = 7;
8743 __ push(lr);
8744 __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
8745
8746 // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
8747 __ mov(r0, Operand(1));
8748 __ str(r0, MemOperand(sp, 2 * kPointerSize));
8749
8750 // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
8751 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
8752 __ ldr(r0, MemOperand(r0, 0));
8753 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
8754 __ ldr(r2, MemOperand(r2, 0));
8755 __ add(r0, r0, Operand(r2));
8756 __ str(r0, MemOperand(sp, 1 * kPointerSize));
8757
8758 // Argument 5 (sp[0]): static offsets vector buffer.
8759 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
8760 __ str(r0, MemOperand(sp, 0 * kPointerSize));
8761
8762 // For arguments 4 and 3 get string length, calculate start of string data and
8763 // calculate the shift of the index (0 for ASCII and 1 for two byte).
8764 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
8765 __ mov(r0, Operand(r0, ASR, kSmiTagSize));
8766 ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
8767 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
8768 __ eor(r3, r3, Operand(1));
8769 // Argument 4 (r3): End of string data
8770 // Argument 3 (r2): Start of string data
8771 __ add(r2, r9, Operand(r1, LSL, r3));
8772 __ add(r3, r9, Operand(r0, LSL, r3));
8773
8774 // Argument 2 (r1): Previous index.
8775 // Already there
8776
8777 // Argument 1 (r0): Subject string.
8778 __ mov(r0, subject);
8779
8780 // Locate the code entry and call it.
8781 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
8782 __ CallCFunction(r7, kRegExpExecuteArguments);
8783 __ pop(lr);
8784
8785 // r0: result
8786 // subject: subject string (callee saved)
8787 // regexp_data: RegExp data (callee saved)
8788 // last_match_info_elements: Last match info elements (callee saved)
8789
8790 // Check the result.
8791 Label success;
8792 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
8793 __ b(eq, &success);
8794 Label failure;
8795 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
8796 __ b(eq, &failure);
8797 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
8798 // If not exception it can only be retry. Handle that in the runtime system.
8799 __ b(ne, &runtime);
8800 // Result must now be exception. If there is no pending exception already a
8801 // stack overflow (on the backtrack stack) was detected in RegExp code but
8802 // haven't created the exception yet. Handle that in the runtime system.
8803 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
8804 __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
8805 __ ldr(r0, MemOperand(r0, 0));
8806 __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
8807 __ ldr(r1, MemOperand(r1, 0));
8808 __ cmp(r0, r1);
8809 __ b(eq, &runtime);
8810 __ bind(&failure);
8811 // For failure and exception return null.
8812 __ mov(r0, Operand(Factory::null_value()));
8813 __ add(sp, sp, Operand(4 * kPointerSize));
8814 __ Ret();
8815
8816 // Process the result from the native regexp code.
8817 __ bind(&success);
8818 __ ldr(r1,
8819 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
8820 // Calculate number of capture registers (number_of_captures + 1) * 2.
8821 ASSERT_EQ(0, kSmiTag);
8822 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
8823 __ add(r1, r1, Operand(2)); // r1 was a smi.
8824
8825 // r1: number of capture registers
8826 // r4: subject string
8827 // Store the capture count.
8828 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
8829 __ str(r2, FieldMemOperand(last_match_info_elements,
8830 RegExpImpl::kLastCaptureCountOffset));
8831 // Store last subject and last input.
8832 __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
8833 __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
8834 __ str(subject,
8835 FieldMemOperand(last_match_info_elements,
8836 RegExpImpl::kLastSubjectOffset));
8837 __ RecordWrite(r3, r2, r7);
8838 __ str(subject,
8839 FieldMemOperand(last_match_info_elements,
8840 RegExpImpl::kLastInputOffset));
8841 __ mov(r3, last_match_info_elements);
8842 __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
8843 __ RecordWrite(r3, r2, r7);
8844
8845 // Get the static offsets vector filled by the native regexp code.
8846 ExternalReference address_of_static_offsets_vector =
8847 ExternalReference::address_of_static_offsets_vector();
8848 __ mov(r2, Operand(address_of_static_offsets_vector));
8849
8850 // r1: number of capture registers
8851 // r2: offsets vector
8852 Label next_capture, done;
8853 // Capture register counter starts from number of capture registers and
8854 // counts down until wraping after zero.
8855 __ add(r0,
8856 last_match_info_elements,
8857 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
8858 __ bind(&next_capture);
8859 __ sub(r1, r1, Operand(1), SetCC);
8860 __ b(mi, &done);
8861 // Read the value from the static offsets vector buffer.
8862 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
8863 // Store the smi value in the last match info.
8864 __ mov(r3, Operand(r3, LSL, kSmiTagSize));
8865 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
8866 __ jmp(&next_capture);
8867 __ bind(&done);
8868
8869 // Return last match info.
8870 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
8871 __ add(sp, sp, Operand(4 * kPointerSize));
8872 __ Ret();
8873
8874 // Do the runtime call to execute the regexp.
8875 __ bind(&runtime);
8876 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
Kristian Monsen25f61362010-05-21 11:50:48 +01008877#endif // V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00008878}
8879
8880
8881void CallFunctionStub::Generate(MacroAssembler* masm) {
8882 Label slow;
Leon Clarkee46be812010-01-19 14:06:41 +00008883
8884 // If the receiver might be a value (string, number or boolean) check for this
8885 // and box it if it is.
8886 if (ReceiverMightBeValue()) {
8887 // Get the receiver from the stack.
8888 // function, receiver [, arguments]
8889 Label receiver_is_value, receiver_is_js_object;
8890 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
8891
8892 // Check if receiver is a smi (which is a number value).
8893 __ BranchOnSmi(r1, &receiver_is_value);
8894
8895 // Check if the receiver is a valid JS object.
8896 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
8897 __ b(ge, &receiver_is_js_object);
8898
8899 // Call the runtime to box the value.
8900 __ bind(&receiver_is_value);
8901 __ EnterInternalFrame();
8902 __ push(r1);
8903 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
8904 __ LeaveInternalFrame();
8905 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
8906
8907 __ bind(&receiver_is_js_object);
8908 }
8909
Steve Blocka7e24c12009-10-30 11:49:00 +00008910 // Get the function to call from the stack.
8911 // function, receiver [, arguments]
8912 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
8913
8914 // Check that the function is really a JavaScript function.
8915 // r1: pushed function (to be verified)
8916 __ BranchOnSmi(r1, &slow);
8917 // Get the map of the function object.
8918 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
8919 __ b(ne, &slow);
8920
8921 // Fast-case: Invoke the function now.
8922 // r1: pushed function
8923 ParameterCount actual(argc_);
8924 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
8925
8926 // Slow-case: Non-function called.
8927 __ bind(&slow);
Andrei Popescu402d9372010-02-26 13:31:12 +00008928 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
8929 // of the original receiver from the call site).
8930 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00008931 __ mov(r0, Operand(argc_)); // Setup the number of arguments.
8932 __ mov(r2, Operand(0));
8933 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
8934 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
8935 RelocInfo::CODE_TARGET);
8936}
8937
8938
Steve Block6ded16b2010-05-10 14:33:55 +01008939// Unfortunately you have to run without snapshots to see most of these
8940// names in the profile since most compare stubs end up in the snapshot.
Leon Clarkee46be812010-01-19 14:06:41 +00008941const char* CompareStub::GetName() {
Steve Block6ded16b2010-05-10 14:33:55 +01008942 if (name_ != NULL) return name_;
8943 const int kMaxNameLength = 100;
8944 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
8945 if (name_ == NULL) return "OOM";
8946
8947 const char* cc_name;
Leon Clarkee46be812010-01-19 14:06:41 +00008948 switch (cc_) {
Steve Block6ded16b2010-05-10 14:33:55 +01008949 case lt: cc_name = "LT"; break;
8950 case gt: cc_name = "GT"; break;
8951 case le: cc_name = "LE"; break;
8952 case ge: cc_name = "GE"; break;
8953 case eq: cc_name = "EQ"; break;
8954 case ne: cc_name = "NE"; break;
8955 default: cc_name = "UnknownCondition"; break;
Leon Clarkee46be812010-01-19 14:06:41 +00008956 }
Steve Block6ded16b2010-05-10 14:33:55 +01008957
8958 const char* strict_name = "";
8959 if (strict_ && (cc_ == eq || cc_ == ne)) {
8960 strict_name = "_STRICT";
8961 }
8962
8963 const char* never_nan_nan_name = "";
8964 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
8965 never_nan_nan_name = "_NO_NAN";
8966 }
8967
8968 const char* include_number_compare_name = "";
8969 if (!include_number_compare_) {
8970 include_number_compare_name = "_NO_NUMBER";
8971 }
8972
8973 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
8974 "CompareStub_%s%s%s%s",
8975 cc_name,
8976 strict_name,
8977 never_nan_nan_name,
8978 include_number_compare_name);
8979 return name_;
Leon Clarkee46be812010-01-19 14:06:41 +00008980}
8981
8982
Steve Blocka7e24c12009-10-30 11:49:00 +00008983int CompareStub::MinorKey() {
Steve Block6ded16b2010-05-10 14:33:55 +01008984 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
8985 // stubs the never NaN NaN condition is only taken into account if the
8986 // condition is equals.
8987 ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
8988 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
8989 | StrictField::encode(strict_)
8990 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
8991 | IncludeNumberCompareField::encode(include_number_compare_);
Steve Blocka7e24c12009-10-30 11:49:00 +00008992}
8993
8994
Steve Block6ded16b2010-05-10 14:33:55 +01008995void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
8996 Register object,
8997 Register index,
8998 Register scratch,
8999 Register result,
9000 Label* receiver_not_string,
9001 Label* index_not_smi,
9002 Label* index_out_of_range,
9003 Label* slow_case) {
9004 Label not_a_flat_string;
9005 Label try_again_with_new_string;
9006 Label ascii_string;
9007 Label got_char_code;
9008
9009 // If the receiver is a smi trigger the non-string case.
9010 __ BranchOnSmi(object, receiver_not_string);
9011
9012 // Fetch the instance type of the receiver into result register.
9013 __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
9014 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
9015 // If the receiver is not a string trigger the non-string case.
9016 __ tst(result, Operand(kIsNotStringMask));
9017 __ b(ne, receiver_not_string);
9018
9019 // If the index is non-smi trigger the non-smi case.
9020 __ BranchOnNotSmi(index, index_not_smi);
9021
9022 // Check for index out of range.
9023 __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
9024 // Now scratch has the length of the string. Compare with the index.
9025 __ cmp(scratch, Operand(index));
9026 __ b(ls, index_out_of_range);
9027
9028 __ bind(&try_again_with_new_string);
9029 // ----------- S t a t e -------------
9030 // -- object : string to access
9031 // -- result : instance type of the string
9032 // -- scratch : non-negative index < length
9033 // -----------------------------------
9034
9035 // We need special handling for non-flat strings.
9036 ASSERT_EQ(0, kSeqStringTag);
9037 __ tst(result, Operand(kStringRepresentationMask));
9038 __ b(ne, &not_a_flat_string);
9039
9040 // Check for 1-byte or 2-byte string.
9041 ASSERT_EQ(0, kTwoByteStringTag);
9042 __ tst(result, Operand(kStringEncodingMask));
9043 __ b(ne, &ascii_string);
9044
9045 // 2-byte string. We can add without shifting since the Smi tag size is the
9046 // log2 of the number of bytes in a two-byte character.
9047 ASSERT_EQ(1, kSmiTagSize);
9048 ASSERT_EQ(0, kSmiShiftSize);
9049 __ add(scratch, object, Operand(index));
9050 __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
9051 __ jmp(&got_char_code);
9052
9053 // Handle non-flat strings.
9054 __ bind(&not_a_flat_string);
9055 __ and_(result, result, Operand(kStringRepresentationMask));
9056 __ cmp(result, Operand(kConsStringTag));
9057 __ b(ne, slow_case);
9058
9059 // ConsString.
9060 // Check whether the right hand side is the empty string (i.e. if
9061 // this is really a flat string in a cons string). If that is not
9062 // the case we would rather go to the runtime system now to flatten
9063 // the string.
9064 __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
9065 __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
9066 __ cmp(result, Operand(scratch));
9067 __ b(ne, slow_case);
9068
9069 // Get the first of the two strings and load its instance type.
9070 __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
9071 __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
9072 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
9073 __ jmp(&try_again_with_new_string);
9074
9075 // ASCII string.
9076 __ bind(&ascii_string);
9077 __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
9078 __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
9079
9080 __ bind(&got_char_code);
9081 __ mov(result, Operand(result, LSL, kSmiTagSize));
9082}
9083
9084
9085void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
9086 Register code,
9087 Register scratch,
9088 Register result,
9089 InvokeFlag flag) {
9090 ASSERT(!code.is(result));
9091
9092 Label slow_case;
9093 Label exit;
9094
9095 // Fast case of Heap::LookupSingleCharacterStringFromCode.
9096 ASSERT(kSmiTag == 0);
9097 ASSERT(kSmiShiftSize == 0);
9098 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
9099 __ tst(code, Operand(kSmiTagMask |
9100 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
9101 __ b(nz, &slow_case);
9102
9103 ASSERT(kSmiTag == 0);
9104 __ mov(result, Operand(Factory::single_character_string_cache()));
9105 __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
9106 __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
9107 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
9108 __ cmp(result, scratch);
9109 __ b(eq, &slow_case);
9110 __ b(&exit);
9111
9112 __ bind(&slow_case);
9113 if (flag == CALL_FUNCTION) {
9114 __ push(code);
9115 __ CallRuntime(Runtime::kCharFromCode, 1);
9116 if (!result.is(r0)) {
9117 __ mov(result, r0);
9118 }
9119 } else {
9120 ASSERT(flag == JUMP_FUNCTION);
9121 ASSERT(result.is(r0));
9122 __ push(code);
9123 __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
9124 }
9125
9126 __ bind(&exit);
9127 if (flag == JUMP_FUNCTION) {
9128 ASSERT(result.is(r0));
9129 __ Ret();
9130 }
9131}
9132
9133
9134void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
9135 Register dest,
9136 Register src,
9137 Register count,
9138 Register scratch,
9139 bool ascii) {
Andrei Popescu31002712010-02-23 13:46:05 +00009140 Label loop;
9141 Label done;
9142 // This loop just copies one character at a time, as it is only used for very
9143 // short strings.
9144 if (!ascii) {
9145 __ add(count, count, Operand(count), SetCC);
9146 } else {
9147 __ cmp(count, Operand(0));
9148 }
9149 __ b(eq, &done);
9150
9151 __ bind(&loop);
9152 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
9153 // Perform sub between load and dependent store to get the load time to
9154 // complete.
9155 __ sub(count, count, Operand(1), SetCC);
9156 __ strb(scratch, MemOperand(dest, 1, PostIndex));
9157 // last iteration.
9158 __ b(gt, &loop);
9159
9160 __ bind(&done);
9161}
9162
9163
9164enum CopyCharactersFlags {
9165 COPY_ASCII = 1,
9166 DEST_ALWAYS_ALIGNED = 2
9167};
9168
9169
Steve Block6ded16b2010-05-10 14:33:55 +01009170void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
9171 Register dest,
9172 Register src,
9173 Register count,
9174 Register scratch1,
9175 Register scratch2,
9176 Register scratch3,
9177 Register scratch4,
9178 Register scratch5,
9179 int flags) {
Andrei Popescu31002712010-02-23 13:46:05 +00009180 bool ascii = (flags & COPY_ASCII) != 0;
9181 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
9182
9183 if (dest_always_aligned && FLAG_debug_code) {
9184 // Check that destination is actually word aligned if the flag says
9185 // that it is.
9186 __ tst(dest, Operand(kPointerAlignmentMask));
9187 __ Check(eq, "Destination of copy not aligned.");
9188 }
9189
9190 const int kReadAlignment = 4;
9191 const int kReadAlignmentMask = kReadAlignment - 1;
9192 // Ensure that reading an entire aligned word containing the last character
9193 // of a string will not read outside the allocated area (because we pad up
9194 // to kObjectAlignment).
9195 ASSERT(kObjectAlignment >= kReadAlignment);
9196 // Assumes word reads and writes are little endian.
9197 // Nothing to do for zero characters.
9198 Label done;
9199 if (!ascii) {
9200 __ add(count, count, Operand(count), SetCC);
9201 } else {
9202 __ cmp(count, Operand(0));
9203 }
9204 __ b(eq, &done);
9205
9206 // Assume that you cannot read (or write) unaligned.
9207 Label byte_loop;
9208 // Must copy at least eight bytes, otherwise just do it one byte at a time.
9209 __ cmp(count, Operand(8));
9210 __ add(count, dest, Operand(count));
9211 Register limit = count; // Read until src equals this.
9212 __ b(lt, &byte_loop);
9213
9214 if (!dest_always_aligned) {
9215 // Align dest by byte copying. Copies between zero and three bytes.
9216 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
9217 Label dest_aligned;
9218 __ b(eq, &dest_aligned);
9219 __ cmp(scratch4, Operand(2));
9220 __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
9221 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
9222 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
9223 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9224 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
9225 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
9226 __ bind(&dest_aligned);
9227 }
9228
9229 Label simple_loop;
9230
9231 __ sub(scratch4, dest, Operand(src));
9232 __ and_(scratch4, scratch4, Operand(0x03), SetCC);
9233 __ b(eq, &simple_loop);
9234 // Shift register is number of bits in a source word that
9235 // must be combined with bits in the next source word in order
9236 // to create a destination word.
9237
9238 // Complex loop for src/dst that are not aligned the same way.
9239 {
9240 Label loop;
9241 __ mov(scratch4, Operand(scratch4, LSL, 3));
9242 Register left_shift = scratch4;
9243 __ and_(src, src, Operand(~3)); // Round down to load previous word.
9244 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9245 // Store the "shift" most significant bits of scratch in the least
9246 // signficant bits (i.e., shift down by (32-shift)).
9247 __ rsb(scratch2, left_shift, Operand(32));
9248 Register right_shift = scratch2;
9249 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
9250
9251 __ bind(&loop);
9252 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
9253 __ sub(scratch5, limit, Operand(dest));
9254 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
9255 __ str(scratch1, MemOperand(dest, 4, PostIndex));
9256 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
9257 // Loop if four or more bytes left to copy.
9258 // Compare to eight, because we did the subtract before increasing dst.
9259 __ sub(scratch5, scratch5, Operand(8), SetCC);
9260 __ b(ge, &loop);
9261 }
9262 // There is now between zero and three bytes left to copy (negative that
9263 // number is in scratch5), and between one and three bytes already read into
9264 // scratch1 (eight times that number in scratch4). We may have read past
9265 // the end of the string, but because objects are aligned, we have not read
9266 // past the end of the object.
9267 // Find the minimum of remaining characters to move and preloaded characters
9268 // and write those as bytes.
9269 __ add(scratch5, scratch5, Operand(4), SetCC);
9270 __ b(eq, &done);
9271 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
9272 // Move minimum of bytes read and bytes left to copy to scratch4.
9273 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
9274 // Between one and three (value in scratch5) characters already read into
9275 // scratch ready to write.
9276 __ cmp(scratch5, Operand(2));
9277 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9278 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
9279 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
9280 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
9281 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
9282 // Copy any remaining bytes.
9283 __ b(&byte_loop);
9284
9285 // Simple loop.
9286 // Copy words from src to dst, until less than four bytes left.
9287 // Both src and dest are word aligned.
9288 __ bind(&simple_loop);
9289 {
9290 Label loop;
9291 __ bind(&loop);
9292 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9293 __ sub(scratch3, limit, Operand(dest));
9294 __ str(scratch1, MemOperand(dest, 4, PostIndex));
9295 // Compare to 8, not 4, because we do the substraction before increasing
9296 // dest.
9297 __ cmp(scratch3, Operand(8));
9298 __ b(ge, &loop);
9299 }
9300
9301 // Copy bytes from src to dst until dst hits limit.
9302 __ bind(&byte_loop);
9303 __ cmp(dest, Operand(limit));
9304 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
9305 __ b(ge, &done);
9306 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9307 __ b(&byte_loop);
9308
9309 __ bind(&done);
9310}
9311
9312
Steve Block6ded16b2010-05-10 14:33:55 +01009313void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
9314 Register c1,
9315 Register c2,
9316 Register scratch1,
9317 Register scratch2,
9318 Register scratch3,
9319 Register scratch4,
9320 Register scratch5,
9321 Label* not_found) {
9322 // Register scratch3 is the general scratch register in this function.
9323 Register scratch = scratch3;
9324
9325 // Make sure that both characters are not digits as such strings has a
9326 // different hash algorithm. Don't try to look for these in the symbol table.
9327 Label not_array_index;
9328 __ sub(scratch, c1, Operand(static_cast<int>('0')));
9329 __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
9330 __ b(hi, &not_array_index);
9331 __ sub(scratch, c2, Operand(static_cast<int>('0')));
9332 __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
9333
9334 // If check failed combine both characters into single halfword.
9335 // This is required by the contract of the method: code at the
9336 // not_found branch expects this combination in c1 register
9337 __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
9338 __ b(ls, not_found);
9339
9340 __ bind(&not_array_index);
9341 // Calculate the two character string hash.
9342 Register hash = scratch1;
9343 StringHelper::GenerateHashInit(masm, hash, c1);
9344 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
9345 StringHelper::GenerateHashGetHash(masm, hash);
9346
9347 // Collect the two characters in a register.
9348 Register chars = c1;
9349 __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
9350
9351 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9352 // hash: hash of two character string.
9353
9354 // Load symbol table
9355 // Load address of first element of the symbol table.
9356 Register symbol_table = c2;
9357 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
9358
9359 // Load undefined value
9360 Register undefined = scratch4;
9361 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
9362
9363 // Calculate capacity mask from the symbol table capacity.
9364 Register mask = scratch2;
9365 __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
9366 __ mov(mask, Operand(mask, ASR, 1));
9367 __ sub(mask, mask, Operand(1));
9368
9369 // Calculate untagged address of the first element of the symbol table.
9370 Register first_symbol_table_element = symbol_table;
9371 __ add(first_symbol_table_element, symbol_table,
9372 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
9373
9374 // Registers
9375 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9376 // hash: hash of two character string
9377 // mask: capacity mask
9378 // first_symbol_table_element: address of the first element of
9379 // the symbol table
9380 // scratch: -
9381
9382 // Perform a number of probes in the symbol table.
9383 static const int kProbes = 4;
9384 Label found_in_symbol_table;
9385 Label next_probe[kProbes];
9386 for (int i = 0; i < kProbes; i++) {
9387 Register candidate = scratch5; // Scratch register contains candidate.
9388
9389 // Calculate entry in symbol table.
9390 if (i > 0) {
9391 __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
9392 } else {
9393 __ mov(candidate, hash);
9394 }
9395
9396 __ and_(candidate, candidate, Operand(mask));
9397
9398 // Load the entry from the symble table.
9399 ASSERT_EQ(1, SymbolTable::kEntrySize);
9400 __ ldr(candidate,
9401 MemOperand(first_symbol_table_element,
9402 candidate,
9403 LSL,
9404 kPointerSizeLog2));
9405
9406 // If entry is undefined no string with this hash can be found.
9407 __ cmp(candidate, undefined);
9408 __ b(eq, not_found);
9409
9410 // If length is not 2 the string is not a candidate.
9411 __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
9412 __ cmp(scratch, Operand(Smi::FromInt(2)));
9413 __ b(ne, &next_probe[i]);
9414
9415 // Check that the candidate is a non-external ascii string.
9416 __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
9417 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
9418 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
9419 &next_probe[i]);
9420
9421 // Check if the two characters match.
9422 // Assumes that word load is little endian.
9423 __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
9424 __ cmp(chars, scratch);
9425 __ b(eq, &found_in_symbol_table);
9426 __ bind(&next_probe[i]);
9427 }
9428
9429 // No matching 2 character string found by probing.
9430 __ jmp(not_found);
9431
9432 // Scratch register contains result when we fall through to here.
9433 Register result = scratch;
9434 __ bind(&found_in_symbol_table);
9435 __ Move(r0, result);
9436}
9437
9438
9439void StringHelper::GenerateHashInit(MacroAssembler* masm,
9440 Register hash,
9441 Register character) {
9442 // hash = character + (character << 10);
9443 __ add(hash, character, Operand(character, LSL, 10));
9444 // hash ^= hash >> 6;
9445 __ eor(hash, hash, Operand(hash, ASR, 6));
9446}
9447
9448
9449void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
9450 Register hash,
9451 Register character) {
9452 // hash += character;
9453 __ add(hash, hash, Operand(character));
9454 // hash += hash << 10;
9455 __ add(hash, hash, Operand(hash, LSL, 10));
9456 // hash ^= hash >> 6;
9457 __ eor(hash, hash, Operand(hash, ASR, 6));
9458}
9459
9460
9461void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
9462 Register hash) {
9463 // hash += hash << 3;
9464 __ add(hash, hash, Operand(hash, LSL, 3));
9465 // hash ^= hash >> 11;
9466 __ eor(hash, hash, Operand(hash, ASR, 11));
9467 // hash += hash << 15;
9468 __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
9469
9470 // if (hash == 0) hash = 27;
9471 __ mov(hash, Operand(27), LeaveCC, nz);
9472}
9473
9474
Andrei Popescu31002712010-02-23 13:46:05 +00009475void SubStringStub::Generate(MacroAssembler* masm) {
9476 Label runtime;
9477
9478 // Stack frame on entry.
9479 // lr: return address
9480 // sp[0]: to
9481 // sp[4]: from
9482 // sp[8]: string
9483
9484 // This stub is called from the native-call %_SubString(...), so
9485 // nothing can be assumed about the arguments. It is tested that:
9486 // "string" is a sequential string,
9487 // both "from" and "to" are smis, and
9488 // 0 <= from <= to <= string.length.
9489 // If any of these assumptions fail, we call the runtime system.
9490
9491 static const int kToOffset = 0 * kPointerSize;
9492 static const int kFromOffset = 1 * kPointerSize;
9493 static const int kStringOffset = 2 * kPointerSize;
9494
9495
9496 // Check bounds and smi-ness.
9497 __ ldr(r7, MemOperand(sp, kToOffset));
9498 __ ldr(r6, MemOperand(sp, kFromOffset));
9499 ASSERT_EQ(0, kSmiTag);
9500 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9501 // I.e., arithmetic shift right by one un-smi-tags.
9502 __ mov(r2, Operand(r7, ASR, 1), SetCC);
9503 __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
9504 // If either r2 or r6 had the smi tag bit set, then carry is set now.
9505 __ b(cs, &runtime); // Either "from" or "to" is not a smi.
9506 __ b(mi, &runtime); // From is negative.
9507
9508 __ sub(r2, r2, Operand(r3), SetCC);
9509 __ b(mi, &runtime); // Fail if from > to.
Steve Block6ded16b2010-05-10 14:33:55 +01009510 // Special handling of sub-strings of length 1 and 2. One character strings
9511 // are handled in the runtime system (looked up in the single character
9512 // cache). Two character strings are looked for in the symbol cache.
Andrei Popescu31002712010-02-23 13:46:05 +00009513 __ cmp(r2, Operand(2));
Steve Block6ded16b2010-05-10 14:33:55 +01009514 __ b(lt, &runtime);
Andrei Popescu31002712010-02-23 13:46:05 +00009515
9516 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009517 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009518 // r6: from (smi)
9519 // r7: to (smi)
9520
9521 // Make sure first argument is a sequential (or flat) string.
9522 __ ldr(r5, MemOperand(sp, kStringOffset));
9523 ASSERT_EQ(0, kSmiTag);
9524 __ tst(r5, Operand(kSmiTagMask));
9525 __ b(eq, &runtime);
9526 Condition is_string = masm->IsObjectStringType(r5, r1);
9527 __ b(NegateCondition(is_string), &runtime);
9528
9529 // r1: instance type
9530 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009531 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009532 // r5: string
9533 // r6: from (smi)
9534 // r7: to (smi)
9535 Label seq_string;
9536 __ and_(r4, r1, Operand(kStringRepresentationMask));
9537 ASSERT(kSeqStringTag < kConsStringTag);
9538 ASSERT(kExternalStringTag > kConsStringTag);
9539 __ cmp(r4, Operand(kConsStringTag));
9540 __ b(gt, &runtime); // External strings go to runtime.
9541 __ b(lt, &seq_string); // Sequential strings are handled directly.
9542
9543 // Cons string. Try to recurse (once) on the first substring.
9544 // (This adds a little more generality than necessary to handle flattened
9545 // cons strings, but not much).
9546 __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
9547 __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
9548 __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9549 __ tst(r1, Operand(kStringRepresentationMask));
9550 ASSERT_EQ(0, kSeqStringTag);
9551 __ b(ne, &runtime); // Cons and External strings go to runtime.
9552
9553 // Definitly a sequential string.
9554 __ bind(&seq_string);
9555
9556 // r1: instance type.
9557 // r2: length
Steve Block6ded16b2010-05-10 14:33:55 +01009558 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009559 // r5: string
9560 // r6: from (smi)
9561 // r7: to (smi)
9562 __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01009563 __ cmp(r4, Operand(r7));
Andrei Popescu31002712010-02-23 13:46:05 +00009564 __ b(lt, &runtime); // Fail if to > length.
9565
9566 // r1: instance type.
9567 // r2: result string length.
Steve Block6ded16b2010-05-10 14:33:55 +01009568 // r3: from index (untaged smi)
Andrei Popescu31002712010-02-23 13:46:05 +00009569 // r5: string.
9570 // r6: from offset (smi)
9571 // Check for flat ascii string.
9572 Label non_ascii_flat;
9573 __ tst(r1, Operand(kStringEncodingMask));
9574 ASSERT_EQ(0, kTwoByteStringTag);
9575 __ b(eq, &non_ascii_flat);
9576
Steve Block6ded16b2010-05-10 14:33:55 +01009577 Label result_longer_than_two;
9578 __ cmp(r2, Operand(2));
9579 __ b(gt, &result_longer_than_two);
9580
9581 // Sub string of length 2 requested.
9582 // Get the two characters forming the sub string.
9583 __ add(r5, r5, Operand(r3));
9584 __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
9585 __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
9586
9587 // Try to lookup two character string in symbol table.
9588 Label make_two_character_string;
9589 StringHelper::GenerateTwoCharacterSymbolTableProbe(
9590 masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
9591 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9592 __ add(sp, sp, Operand(3 * kPointerSize));
9593 __ Ret();
9594
9595 // r2: result string length.
9596 // r3: two characters combined into halfword in little endian byte order.
9597 __ bind(&make_two_character_string);
9598 __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
9599 __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9600 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9601 __ add(sp, sp, Operand(3 * kPointerSize));
9602 __ Ret();
9603
9604 __ bind(&result_longer_than_two);
9605
Andrei Popescu31002712010-02-23 13:46:05 +00009606 // Allocate the result.
9607 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
9608
9609 // r0: result string.
9610 // r2: result string length.
9611 // r5: string.
9612 // r6: from offset (smi)
9613 // Locate first character of result.
9614 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9615 // Locate 'from' character of string.
9616 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9617 __ add(r5, r5, Operand(r6, ASR, 1));
9618
9619 // r0: result string.
9620 // r1: first character of result string.
9621 // r2: result string length.
9622 // r5: first character of sub string to copy.
9623 ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01009624 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
9625 COPY_ASCII | DEST_ALWAYS_ALIGNED);
Andrei Popescu31002712010-02-23 13:46:05 +00009626 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9627 __ add(sp, sp, Operand(3 * kPointerSize));
9628 __ Ret();
9629
9630 __ bind(&non_ascii_flat);
9631 // r2: result string length.
9632 // r5: string.
9633 // r6: from offset (smi)
9634 // Check for flat two byte string.
9635
9636 // Allocate the result.
9637 __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
9638
9639 // r0: result string.
9640 // r2: result string length.
9641 // r5: string.
9642 // Locate first character of result.
9643 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9644 // Locate 'from' character of string.
9645 __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9646 // As "from" is a smi it is 2 times the value which matches the size of a two
9647 // byte character.
9648 __ add(r5, r5, Operand(r6));
9649
9650 // r0: result string.
9651 // r1: first character of result.
9652 // r2: result length.
9653 // r5: first character of string to copy.
9654 ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01009655 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
9656 DEST_ALWAYS_ALIGNED);
Andrei Popescu31002712010-02-23 13:46:05 +00009657 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
9658 __ add(sp, sp, Operand(3 * kPointerSize));
9659 __ Ret();
9660
9661 // Just jump to runtime to create the sub string.
9662 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01009663 __ TailCallRuntime(Runtime::kSubString, 3, 1);
Andrei Popescu31002712010-02-23 13:46:05 +00009664}
Leon Clarked91b9f72010-01-27 17:25:45 +00009665
9666
9667void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
9668 Register left,
9669 Register right,
9670 Register scratch1,
9671 Register scratch2,
9672 Register scratch3,
9673 Register scratch4) {
9674 Label compare_lengths;
9675 // Find minimum length and length difference.
9676 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
9677 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
9678 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
9679 Register length_delta = scratch3;
9680 __ mov(scratch1, scratch2, LeaveCC, gt);
9681 Register min_length = scratch1;
Steve Block6ded16b2010-05-10 14:33:55 +01009682 ASSERT(kSmiTag == 0);
Leon Clarked91b9f72010-01-27 17:25:45 +00009683 __ tst(min_length, Operand(min_length));
9684 __ b(eq, &compare_lengths);
9685
Steve Block6ded16b2010-05-10 14:33:55 +01009686 // Untag smi.
9687 __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
9688
Leon Clarked91b9f72010-01-27 17:25:45 +00009689 // Setup registers so that we only need to increment one register
9690 // in the loop.
9691 __ add(scratch2, min_length,
9692 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9693 __ add(left, left, Operand(scratch2));
9694 __ add(right, right, Operand(scratch2));
9695 // Registers left and right points to the min_length character of strings.
9696 __ rsb(min_length, min_length, Operand(-1));
9697 Register index = min_length;
9698 // Index starts at -min_length.
9699
9700 {
9701 // Compare loop.
9702 Label loop;
9703 __ bind(&loop);
9704 // Compare characters.
9705 __ add(index, index, Operand(1), SetCC);
9706 __ ldrb(scratch2, MemOperand(left, index), ne);
9707 __ ldrb(scratch4, MemOperand(right, index), ne);
9708 // Skip to compare lengths with eq condition true.
9709 __ b(eq, &compare_lengths);
9710 __ cmp(scratch2, scratch4);
9711 __ b(eq, &loop);
9712 // Fallthrough with eq condition false.
9713 }
9714 // Compare lengths - strings up to min-length are equal.
9715 __ bind(&compare_lengths);
9716 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
9717 // Use zero length_delta as result.
9718 __ mov(r0, Operand(length_delta), SetCC, eq);
9719 // Fall through to here if characters compare not-equal.
9720 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
9721 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
9722 __ Ret();
9723}
9724
9725
9726void StringCompareStub::Generate(MacroAssembler* masm) {
9727 Label runtime;
9728
9729 // Stack frame on entry.
Andrei Popescu31002712010-02-23 13:46:05 +00009730 // sp[0]: right string
9731 // sp[4]: left string
9732 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
9733 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
Leon Clarked91b9f72010-01-27 17:25:45 +00009734
9735 Label not_same;
9736 __ cmp(r0, r1);
9737 __ b(ne, &not_same);
9738 ASSERT_EQ(0, EQUAL);
9739 ASSERT_EQ(0, kSmiTag);
9740 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
9741 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
9742 __ add(sp, sp, Operand(2 * kPointerSize));
9743 __ Ret();
9744
9745 __ bind(&not_same);
9746
9747 // Check that both objects are sequential ascii strings.
9748 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
9749
9750 // Compare flat ascii strings natively. Remove arguments from stack first.
9751 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
9752 __ add(sp, sp, Operand(2 * kPointerSize));
9753 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
9754
9755 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
9756 // tagged as a small integer.
9757 __ bind(&runtime);
Steve Block6ded16b2010-05-10 14:33:55 +01009758 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Leon Clarked91b9f72010-01-27 17:25:45 +00009759}
9760
9761
Andrei Popescu31002712010-02-23 13:46:05 +00009762void StringAddStub::Generate(MacroAssembler* masm) {
9763 Label string_add_runtime;
9764 // Stack on entry:
9765 // sp[0]: second argument.
9766 // sp[4]: first argument.
9767
9768 // Load the two arguments.
9769 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
9770 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
9771
9772 // Make sure that both arguments are strings if not known in advance.
9773 if (string_check_) {
9774 ASSERT_EQ(0, kSmiTag);
9775 __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
9776 // Load instance types.
9777 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9778 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9779 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9780 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9781 ASSERT_EQ(0, kStringTag);
9782 // If either is not a string, go to runtime.
9783 __ tst(r4, Operand(kIsNotStringMask));
9784 __ tst(r5, Operand(kIsNotStringMask), eq);
9785 __ b(ne, &string_add_runtime);
9786 }
9787
9788 // Both arguments are strings.
9789 // r0: first string
9790 // r1: second string
9791 // r4: first string instance type (if string_check_)
9792 // r5: second string instance type (if string_check_)
9793 {
9794 Label strings_not_empty;
9795 // Check if either of the strings are empty. In that case return the other.
9796 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
9797 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01009798 ASSERT(kSmiTag == 0);
9799 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
Andrei Popescu31002712010-02-23 13:46:05 +00009800 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
Steve Block6ded16b2010-05-10 14:33:55 +01009801 ASSERT(kSmiTag == 0);
9802 // Else test if second string is empty.
9803 __ cmp(r3, Operand(Smi::FromInt(0)), ne);
Andrei Popescu31002712010-02-23 13:46:05 +00009804 __ b(ne, &strings_not_empty); // If either string was empty, return r0.
9805
9806 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9807 __ add(sp, sp, Operand(2 * kPointerSize));
9808 __ Ret();
9809
9810 __ bind(&strings_not_empty);
9811 }
9812
Steve Block6ded16b2010-05-10 14:33:55 +01009813 __ mov(r2, Operand(r2, ASR, kSmiTagSize));
9814 __ mov(r3, Operand(r3, ASR, kSmiTagSize));
Andrei Popescu31002712010-02-23 13:46:05 +00009815 // Both strings are non-empty.
9816 // r0: first string
9817 // r1: second string
9818 // r2: length of first string
9819 // r3: length of second string
9820 // r4: first string instance type (if string_check_)
9821 // r5: second string instance type (if string_check_)
9822 // Look at the length of the result of adding the two strings.
Steve Block6ded16b2010-05-10 14:33:55 +01009823 Label string_add_flat_result, longer_than_two;
Andrei Popescu31002712010-02-23 13:46:05 +00009824 // Adding two lengths can't overflow.
9825 ASSERT(String::kMaxLength * 2 > String::kMaxLength);
9826 __ add(r6, r2, Operand(r3));
9827 // Use the runtime system when adding two one character strings, as it
9828 // contains optimizations for this specific case using the symbol table.
9829 __ cmp(r6, Operand(2));
Steve Block6ded16b2010-05-10 14:33:55 +01009830 __ b(ne, &longer_than_two);
9831
9832 // Check that both strings are non-external ascii strings.
9833 if (!string_check_) {
9834 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9835 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9836 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9837 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9838 }
9839 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
9840 &string_add_runtime);
9841
9842 // Get the two characters forming the sub string.
9843 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9844 __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
9845
9846 // Try to lookup two character string in symbol table. If it is not found
9847 // just allocate a new one.
9848 Label make_two_character_string;
9849 StringHelper::GenerateTwoCharacterSymbolTableProbe(
9850 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
9851 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9852 __ add(sp, sp, Operand(2 * kPointerSize));
9853 __ Ret();
9854
9855 __ bind(&make_two_character_string);
9856 // Resulting string has length 2 and first chars of two strings
9857 // are combined into single halfword in r2 register.
9858 // So we can fill resulting string without two loops by a single
9859 // halfword store instruction (which assumes that processor is
9860 // in a little endian mode)
9861 __ mov(r6, Operand(2));
9862 __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
9863 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
9864 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9865 __ add(sp, sp, Operand(2 * kPointerSize));
9866 __ Ret();
9867
9868 __ bind(&longer_than_two);
Andrei Popescu31002712010-02-23 13:46:05 +00009869 // Check if resulting string will be flat.
9870 __ cmp(r6, Operand(String::kMinNonFlatLength));
9871 __ b(lt, &string_add_flat_result);
9872 // Handle exceptionally long strings in the runtime system.
9873 ASSERT((String::kMaxLength & 0x80000000) == 0);
9874 ASSERT(IsPowerOf2(String::kMaxLength + 1));
9875 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
9876 __ cmp(r6, Operand(String::kMaxLength + 1));
9877 __ b(hs, &string_add_runtime);
9878
9879 // If result is not supposed to be flat, allocate a cons string object.
9880 // If both strings are ascii the result is an ascii cons string.
9881 if (!string_check_) {
9882 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9883 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9884 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9885 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9886 }
9887 Label non_ascii, allocated;
9888 ASSERT_EQ(0, kTwoByteStringTag);
9889 __ tst(r4, Operand(kStringEncodingMask));
9890 __ tst(r5, Operand(kStringEncodingMask), ne);
9891 __ b(eq, &non_ascii);
9892
9893 // Allocate an ASCII cons string.
9894 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
9895 __ bind(&allocated);
9896 // Fill the fields of the cons string.
9897 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
9898 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
9899 __ mov(r0, Operand(r7));
9900 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9901 __ add(sp, sp, Operand(2 * kPointerSize));
9902 __ Ret();
9903
9904 __ bind(&non_ascii);
9905 // Allocate a two byte cons string.
9906 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
9907 __ jmp(&allocated);
9908
9909 // Handle creating a flat result. First check that both strings are
9910 // sequential and that they have the same encoding.
9911 // r0: first string
9912 // r1: second string
9913 // r2: length of first string
9914 // r3: length of second string
9915 // r4: first string instance type (if string_check_)
9916 // r5: second string instance type (if string_check_)
9917 // r6: sum of lengths.
9918 __ bind(&string_add_flat_result);
9919 if (!string_check_) {
9920 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
9921 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
9922 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
9923 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
9924 }
9925 // Check that both strings are sequential.
9926 ASSERT_EQ(0, kSeqStringTag);
9927 __ tst(r4, Operand(kStringRepresentationMask));
9928 __ tst(r5, Operand(kStringRepresentationMask), eq);
9929 __ b(ne, &string_add_runtime);
9930 // Now check if both strings have the same encoding (ASCII/Two-byte).
9931 // r0: first string.
9932 // r1: second string.
9933 // r2: length of first string.
9934 // r3: length of second string.
9935 // r6: sum of lengths..
9936 Label non_ascii_string_add_flat_result;
9937 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
9938 __ eor(r7, r4, Operand(r5));
9939 __ tst(r7, Operand(kStringEncodingMask));
9940 __ b(ne, &string_add_runtime);
9941 // And see if it's ASCII or two-byte.
9942 __ tst(r4, Operand(kStringEncodingMask));
9943 __ b(eq, &non_ascii_string_add_flat_result);
9944
9945 // Both strings are sequential ASCII strings. We also know that they are
9946 // short (since the sum of the lengths is less than kMinNonFlatLength).
Steve Block6ded16b2010-05-10 14:33:55 +01009947 // r6: length of resulting flat string
Andrei Popescu31002712010-02-23 13:46:05 +00009948 __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
9949 // Locate first character of result.
9950 __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9951 // Locate first character of first argument.
9952 __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9953 // r0: first character of first string.
9954 // r1: second string.
9955 // r2: length of first string.
9956 // r3: length of second string.
9957 // r6: first character of result.
9958 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +01009959 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
Andrei Popescu31002712010-02-23 13:46:05 +00009960
9961 // Load second argument and locate first character.
9962 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9963 // r1: first character of second string.
9964 // r3: length of second string.
9965 // r6: next character of result.
9966 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +01009967 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
Andrei Popescu31002712010-02-23 13:46:05 +00009968 __ mov(r0, Operand(r7));
9969 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
9970 __ add(sp, sp, Operand(2 * kPointerSize));
9971 __ Ret();
9972
9973 __ bind(&non_ascii_string_add_flat_result);
9974 // Both strings are sequential two byte strings.
9975 // r0: first string.
9976 // r1: second string.
9977 // r2: length of first string.
9978 // r3: length of second string.
9979 // r6: sum of length of strings.
9980 __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
9981 // r0: first string.
9982 // r1: second string.
9983 // r2: length of first string.
9984 // r3: length of second string.
9985 // r7: result string.
9986
9987 // Locate first character of result.
9988 __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9989 // Locate first character of first argument.
9990 __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9991
9992 // r0: first character of first string.
9993 // r1: second string.
9994 // r2: length of first string.
9995 // r3: length of second string.
9996 // r6: first character of result.
9997 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +01009998 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
Andrei Popescu31002712010-02-23 13:46:05 +00009999
10000 // Locate first character of second argument.
10001 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10002
10003 // r1: first character of second string.
10004 // r3: length of second string.
10005 // r6: next character of result (after copy of first string).
10006 // r7: result string.
Steve Block6ded16b2010-05-10 14:33:55 +010010007 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
Andrei Popescu31002712010-02-23 13:46:05 +000010008
10009 __ mov(r0, Operand(r7));
10010 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10011 __ add(sp, sp, Operand(2 * kPointerSize));
10012 __ Ret();
10013
10014 // Just jump to runtime to add the two strings.
10015 __ bind(&string_add_runtime);
Steve Block6ded16b2010-05-10 14:33:55 +010010016 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
Andrei Popescu31002712010-02-23 13:46:05 +000010017}
10018
10019
Steve Blocka7e24c12009-10-30 11:49:00 +000010020#undef __
10021
10022} } // namespace v8::internal