blob: 92abf6d9c1dd085cdbe3a1d14cd4bf9009a4711f [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000034#include "codegen.h"
Steve Block44f0eee2011-05-26 01:26:41 +010035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
Ben Murdoch257744e2011-11-30 15:57:28 +000043static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44 Label* slow,
45 Condition cc,
46 bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Register lhs,
49 Register rhs,
50 Label* rhs_not_nan,
51 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55 Register lhs,
56 Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
Steve Block44f0eee2011-05-26 01:26:41 +010068
69void ToNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000070 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ mov(v0, a0);
74 __ Ret();
75
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ mov(v0, a0);
79 __ Ret();
80
81 __ bind(&call_builtin);
82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +010084}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000088 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
90 Label gc;
91
92 // Pop the function info from the stack.
93 __ pop(a3);
94
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
97 v0,
98 a1,
99 a2,
100 &gc,
101 TAG_OBJECT);
102
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000103 int map_index = (language_mode_ == CLASSIC_MODE)
104 ? Context::FUNCTION_MAP_INDEX
105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
Ben Murdoch257744e2011-11-30 15:57:28 +0000106
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132
133 // Return result. The argument function info has been popped already.
134 __ Ret();
135
136 // Create a new closure through the slower runtime call.
137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000145 // Try to allocate the context in new space.
146 Label gc;
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
151 v0,
152 a1,
153 a2,
154 &gc,
155 TAG_OBJECT);
156
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
159
160 // Setup the object header.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
Ben Murdoch257744e2011-11-30 15:57:28 +0000162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
166 // Setup the fixed slots.
167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +0000170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
171
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000172 // Copy the global object from the previous context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
175
176 // Initialize the rest of the slots to undefined.
177 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
178 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
179 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
180 }
181
182 // Remove the on-stack argument and return.
183 __ mov(cp, v0);
184 __ Pop();
185 __ Ret();
186
187 // Need to collect. Call into runtime system.
188 __ bind(&gc);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000189 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100190}
191
192
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000193void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
194 // Stack layout on entry:
195 //
196 // [sp]: function.
197 // [sp + kPointerSize]: serialized scope info
198
199 // Try to allocate the context in new space.
200 Label gc;
201 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
202 __ AllocateInNewSpace(FixedArray::SizeFor(length),
203 v0, a1, a2, &gc, TAG_OBJECT);
204
205 // Load the function from the stack.
206 __ lw(a3, MemOperand(sp, 0));
207
208 // Load the serialized scope info from the stack.
209 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
210
211 // Setup the object header.
212 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
213 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
214 __ li(a2, Operand(Smi::FromInt(length)));
215 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
216
217 // If this block context is nested in the global context we get a smi
218 // sentinel instead of a function. The block context should get the
219 // canonical empty function of the global context as its closure which
220 // we still have to look up.
221 Label after_sentinel;
222 __ JumpIfNotSmi(a3, &after_sentinel);
223 if (FLAG_debug_code) {
224 const char* message = "Expected 0 as a Smi sentinel";
225 __ Assert(eq, message, a3, Operand(zero_reg));
226 }
227 __ lw(a3, GlobalObjectOperand());
228 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
229 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
230 __ bind(&after_sentinel);
231
232 // Setup the fixed slots.
233 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
234 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
235 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
236
237 // Copy the global object from the previous context.
238 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
239 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
240
241 // Initialize the rest of the slots to the hole value.
242 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
243 for (int i = 0; i < slots_; i++) {
244 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
245 }
246
247 // Remove the on-stack argument and return.
248 __ mov(cp, v0);
249 __ Addu(sp, sp, Operand(2 * kPointerSize));
250 __ Ret();
251
252 // Need to collect. Call into runtime system.
253 __ bind(&gc);
254 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
255}
256
257
258static void GenerateFastCloneShallowArrayCommon(
259 MacroAssembler* masm,
260 int length,
261 FastCloneShallowArrayStub::Mode mode,
262 Label* fail) {
263 // Registers on entry:
264 // a3: boilerplate literal array.
265 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
266
267 // All sizes here are multiples of kPointerSize.
268 int elements_size = 0;
269 if (length > 0) {
270 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
271 ? FixedDoubleArray::SizeFor(length)
272 : FixedArray::SizeFor(length);
273 }
274 int size = JSArray::kSize + elements_size;
275
276 // Allocate both the JS array and the elements array in one big
277 // allocation. This avoids multiple limit checks.
278 __ AllocateInNewSpace(size,
279 v0,
280 a1,
281 a2,
282 fail,
283 TAG_OBJECT);
284
285 // Copy the JS array part.
286 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
287 if ((i != JSArray::kElementsOffset) || (length == 0)) {
288 __ lw(a1, FieldMemOperand(a3, i));
289 __ sw(a1, FieldMemOperand(v0, i));
290 }
291 }
292
293 if (length > 0) {
294 // Get hold of the elements array of the boilerplate and setup the
295 // elements pointer in the resulting object.
296 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
297 __ Addu(a2, v0, Operand(JSArray::kSize));
298 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
299
300 // Copy the elements array.
301 ASSERT((elements_size % kPointerSize) == 0);
302 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
303 }
304}
305
Steve Block44f0eee2011-05-26 01:26:41 +0100306void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000307 // Stack layout on entry:
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000308 //
Ben Murdoch257744e2011-11-30 15:57:28 +0000309 // [sp]: constant elements.
310 // [sp + kPointerSize]: literal index.
311 // [sp + (2 * kPointerSize)]: literals array.
312
Ben Murdoch257744e2011-11-30 15:57:28 +0000313 // Load boilerplate object into r3 and check if we need to create a
314 // boilerplate.
315 Label slow_case;
316 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
317 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
318 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
319 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
320 __ Addu(t0, a3, t0);
321 __ lw(a3, MemOperand(t0));
322 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
323 __ Branch(&slow_case, eq, a3, Operand(t1));
324
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000325 FastCloneShallowArrayStub::Mode mode = mode_;
326 if (mode == CLONE_ANY_ELEMENTS) {
327 Label double_elements, check_fast_elements;
328 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
329 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
330 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
331 __ Branch(&check_fast_elements, ne, v0, Operand(t1));
332 GenerateFastCloneShallowArrayCommon(masm, 0,
333 COPY_ON_WRITE_ELEMENTS, &slow_case);
334 // Return and remove the on-stack parameters.
335 __ DropAndRet(3);
336
337 __ bind(&check_fast_elements);
338 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
339 __ Branch(&double_elements, ne, v0, Operand(t1));
340 GenerateFastCloneShallowArrayCommon(masm, length_,
341 CLONE_ELEMENTS, &slow_case);
342 // Return and remove the on-stack parameters.
343 __ DropAndRet(3);
344
345 __ bind(&double_elements);
346 mode = CLONE_DOUBLE_ELEMENTS;
347 // Fall through to generate the code to handle double elements.
348 }
349
Ben Murdoch257744e2011-11-30 15:57:28 +0000350 if (FLAG_debug_code) {
351 const char* message;
352 Heap::RootListIndex expected_map_index;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000353 if (mode == CLONE_ELEMENTS) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000354 message = "Expected (writable) fixed array";
355 expected_map_index = Heap::kFixedArrayMapRootIndex;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000356 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
357 message = "Expected (writable) fixed double array";
358 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +0000359 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000360 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
Ben Murdoch257744e2011-11-30 15:57:28 +0000361 message = "Expected copy-on-write fixed array";
362 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
363 }
364 __ push(a3);
365 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
366 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
367 __ LoadRoot(at, expected_map_index);
368 __ Assert(eq, message, a3, Operand(at));
369 __ pop(a3);
370 }
371
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000372 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
Ben Murdoch257744e2011-11-30 15:57:28 +0000373
374 // Return and remove the on-stack parameters.
375 __ Addu(sp, sp, Operand(3 * kPointerSize));
376 __ Ret();
377
378 __ bind(&slow_case);
379 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100380}
381
382
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000383void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
384 // Stack layout on entry:
385 //
386 // [sp]: object literal flags.
387 // [sp + kPointerSize]: constant properties.
388 // [sp + (2 * kPointerSize)]: literal index.
389 // [sp + (3 * kPointerSize)]: literals array.
390
391 // Load boilerplate object into a3 and check if we need to create a
392 // boilerplate.
393 Label slow_case;
394 __ lw(a3, MemOperand(sp, 3 * kPointerSize));
395 __ lw(a0, MemOperand(sp, 2 * kPointerSize));
396 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
397 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
398 __ Addu(a3, t0, a3);
399 __ lw(a3, MemOperand(a3));
400 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
401 __ Branch(&slow_case, eq, a3, Operand(t0));
402
403 // Check that the boilerplate contains only fast properties and we can
404 // statically determine the instance size.
405 int size = JSObject::kHeaderSize + length_ * kPointerSize;
406 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
407 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
408 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
409
410 // Allocate the JS object and copy header together with all in-object
411 // properties from the boilerplate.
412 __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
413 for (int i = 0; i < size; i += kPointerSize) {
414 __ lw(a1, FieldMemOperand(a3, i));
415 __ sw(a1, FieldMemOperand(a0, i));
416 }
417
418 // Return and remove the on-stack parameters.
419 __ Drop(4);
420 __ Ret(USE_DELAY_SLOT);
421 __ mov(v0, a0);
422
423 __ bind(&slow_case);
424 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
425}
426
427
Steve Block44f0eee2011-05-26 01:26:41 +0100428// Takes a Smi and converts to an IEEE 64 bit floating point value in two
429// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
430// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
431// scratch register. Destroys the source register. No GC occurs during this
432// stub so you don't have to set up the frame.
433class ConvertToDoubleStub : public CodeStub {
434 public:
435 ConvertToDoubleStub(Register result_reg_1,
436 Register result_reg_2,
437 Register source_reg,
438 Register scratch_reg)
439 : result1_(result_reg_1),
440 result2_(result_reg_2),
441 source_(source_reg),
442 zeros_(scratch_reg) { }
443
444 private:
445 Register result1_;
446 Register result2_;
447 Register source_;
448 Register zeros_;
449
450 // Minor key encoding in 16 bits.
451 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
452 class OpBits: public BitField<Token::Value, 2, 14> {};
453
454 Major MajorKey() { return ConvertToDouble; }
455 int MinorKey() {
456 // Encode the parameters in a unique 16 bit value.
457 return result1_.code() +
458 (result2_.code() << 4) +
459 (source_.code() << 8) +
460 (zeros_.code() << 12);
461 }
462
463 void Generate(MacroAssembler* masm);
Steve Block44f0eee2011-05-26 01:26:41 +0100464};
465
466
467void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000468#ifndef BIG_ENDIAN_FLOATING_POINT
469 Register exponent = result1_;
470 Register mantissa = result2_;
471#else
472 Register exponent = result2_;
473 Register mantissa = result1_;
474#endif
475 Label not_special;
476 // Convert from Smi to integer.
477 __ sra(source_, source_, kSmiTagSize);
478 // Move sign bit from source to destination. This works because the sign bit
479 // in the exponent word of the double has the same position and polarity as
480 // the 2's complement sign bit in a Smi.
481 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
482 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
483 // Subtract from 0 if source was negative.
484 __ subu(at, zero_reg, source_);
485 __ movn(source_, at, exponent);
486
487 // We have -1, 0 or 1, which we treat specially. Register source_ contains
488 // absolute value: it is either equal to 1 (special case of -1 and 1),
489 // greater than 1 (not a special case) or less than 1 (special case of 0).
490 __ Branch(&not_special, gt, source_, Operand(1));
491
492 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
493 static const uint32_t exponent_word_for_1 =
494 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
495 // Safe to use 'at' as dest reg here.
496 __ Or(at, exponent, Operand(exponent_word_for_1));
497 __ movn(exponent, at, source_); // Write exp when source not 0.
498 // 1, 0 and -1 all have 0 for the second word.
499 __ mov(mantissa, zero_reg);
500 __ Ret();
501
502 __ bind(&not_special);
503 // Count leading zeros.
504 // Gets the wrong answer for 0, but we already checked for that case above.
505 __ clz(zeros_, source_);
506 // Compute exponent and or it into the exponent register.
507 // We use mantissa as a scratch register here.
508 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
509 __ subu(mantissa, mantissa, zeros_);
510 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
511 __ Or(exponent, exponent, mantissa);
512
513 // Shift up the source chopping the top bit off.
514 __ Addu(zeros_, zeros_, Operand(1));
515 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
516 __ sllv(source_, source_, zeros_);
517 // Compute lower part of fraction (last 12 bits).
518 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
519 // And the top (top 20 bits).
520 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
521 __ or_(exponent, exponent, source_);
522
523 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100524}
525
526
Steve Block44f0eee2011-05-26 01:26:41 +0100527void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
528 FloatingPointHelper::Destination destination,
529 Register scratch1,
530 Register scratch2) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000531 if (CpuFeatures::IsSupported(FPU)) {
532 CpuFeatures::Scope scope(FPU);
533 __ sra(scratch1, a0, kSmiTagSize);
534 __ mtc1(scratch1, f14);
535 __ cvt_d_w(f14, f14);
536 __ sra(scratch1, a1, kSmiTagSize);
537 __ mtc1(scratch1, f12);
538 __ cvt_d_w(f12, f12);
539 if (destination == kCoreRegisters) {
540 __ Move(a2, a3, f14);
541 __ Move(a0, a1, f12);
542 }
543 } else {
544 ASSERT(destination == kCoreRegisters);
545 // Write Smi from a0 to a3 and a2 in double format.
546 __ mov(scratch1, a0);
547 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
548 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000549 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000550 // Write Smi from a1 to a1 and a0 in double format.
551 __ mov(scratch1, a1);
552 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000553 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000554 __ pop(ra);
555 }
Steve Block44f0eee2011-05-26 01:26:41 +0100556}
557
558
559void FloatingPointHelper::LoadOperands(
560 MacroAssembler* masm,
561 FloatingPointHelper::Destination destination,
562 Register heap_number_map,
563 Register scratch1,
564 Register scratch2,
565 Label* slow) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000566
567 // Load right operand (a0) to f12 or a2/a3.
568 LoadNumber(masm, destination,
569 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
570
571 // Load left operand (a1) to f14 or a0/a1.
572 LoadNumber(masm, destination,
573 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
Steve Block44f0eee2011-05-26 01:26:41 +0100574}
575
576
577void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
578 Destination destination,
579 Register object,
580 FPURegister dst,
581 Register dst1,
582 Register dst2,
583 Register heap_number_map,
584 Register scratch1,
585 Register scratch2,
586 Label* not_number) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000587 if (FLAG_debug_code) {
588 __ AbortIfNotRootValue(heap_number_map,
589 Heap::kHeapNumberMapRootIndex,
590 "HeapNumberMap register clobbered.");
591 }
592
593 Label is_smi, done;
594
595 __ JumpIfSmi(object, &is_smi);
596 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
597
598 // Handle loading a double from a heap number.
599 if (CpuFeatures::IsSupported(FPU) &&
600 destination == kFPURegisters) {
601 CpuFeatures::Scope scope(FPU);
602 // Load the double from tagged HeapNumber to double register.
603
604 // ARM uses a workaround here because of the unaligned HeapNumber
605 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
606 // point in generating even more instructions.
607 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
608 } else {
609 ASSERT(destination == kCoreRegisters);
610 // Load the double from heap number to dst1 and dst2 in double format.
611 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
612 __ lw(dst2, FieldMemOperand(object,
613 HeapNumber::kValueOffset + kPointerSize));
614 }
615 __ Branch(&done);
616
617 // Handle loading a double from a smi.
618 __ bind(&is_smi);
619 if (CpuFeatures::IsSupported(FPU)) {
620 CpuFeatures::Scope scope(FPU);
621 // Convert smi to double using FPU instructions.
622 __ SmiUntag(scratch1, object);
623 __ mtc1(scratch1, dst);
624 __ cvt_d_w(dst, dst);
625 if (destination == kCoreRegisters) {
626 // Load the converted smi to dst1 and dst2 in double format.
627 __ Move(dst1, dst2, dst);
628 }
629 } else {
630 ASSERT(destination == kCoreRegisters);
631 // Write smi to dst1 and dst2 double format.
632 __ mov(scratch1, object);
633 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
634 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000635 __ Call(stub.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000636 __ pop(ra);
637 }
638
639 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +0100640}
641
642
Ben Murdoch257744e2011-11-30 15:57:28 +0000643void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
644 Register object,
645 Register dst,
646 Register heap_number_map,
647 Register scratch1,
648 Register scratch2,
649 Register scratch3,
650 FPURegister double_scratch,
651 Label* not_number) {
652 if (FLAG_debug_code) {
653 __ AbortIfNotRootValue(heap_number_map,
654 Heap::kHeapNumberMapRootIndex,
655 "HeapNumberMap register clobbered.");
656 }
657 Label is_smi;
658 Label done;
659 Label not_in_int32_range;
660
661 __ JumpIfSmi(object, &is_smi);
662 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
663 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
664 __ ConvertToInt32(object,
665 dst,
666 scratch1,
667 scratch2,
668 double_scratch,
669 &not_in_int32_range);
670 __ jmp(&done);
671
672 __ bind(&not_in_int32_range);
673 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
674 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
675
676 __ EmitOutOfInt32RangeTruncate(dst,
677 scratch1,
678 scratch2,
679 scratch3);
680
681 __ jmp(&done);
682
683 __ bind(&is_smi);
684 __ SmiUntag(dst, object);
685 __ bind(&done);
686}
687
688
689void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
690 Register int_scratch,
691 Destination destination,
692 FPURegister double_dst,
693 Register dst1,
694 Register dst2,
695 Register scratch2,
696 FPURegister single_scratch) {
697 ASSERT(!int_scratch.is(scratch2));
698 ASSERT(!int_scratch.is(dst1));
699 ASSERT(!int_scratch.is(dst2));
700
701 Label done;
702
703 if (CpuFeatures::IsSupported(FPU)) {
704 CpuFeatures::Scope scope(FPU);
705 __ mtc1(int_scratch, single_scratch);
706 __ cvt_d_w(double_dst, single_scratch);
707 if (destination == kCoreRegisters) {
708 __ Move(dst1, dst2, double_dst);
709 }
710 } else {
711 Label fewer_than_20_useful_bits;
712 // Expected output:
713 // | dst2 | dst1 |
714 // | s | exp | mantissa |
715
716 // Check for zero.
717 __ mov(dst2, int_scratch);
718 __ mov(dst1, int_scratch);
719 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
720
721 // Preload the sign of the value.
722 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
723 // Get the absolute value of the object (as an unsigned integer).
724 Label skip_sub;
725 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
726 __ Subu(int_scratch, zero_reg, int_scratch);
727 __ bind(&skip_sub);
728
729 // Get mantisssa[51:20].
730
731 // Get the position of the first set bit.
732 __ clz(dst1, int_scratch);
733 __ li(scratch2, 31);
734 __ Subu(dst1, scratch2, dst1);
735
736 // Set the exponent.
737 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
738 __ Ins(dst2, scratch2,
739 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
740
741 // Clear the first non null bit.
742 __ li(scratch2, Operand(1));
743 __ sllv(scratch2, scratch2, dst1);
744 __ li(at, -1);
745 __ Xor(scratch2, scratch2, at);
746 __ And(int_scratch, int_scratch, scratch2);
747
748 // Get the number of bits to set in the lower part of the mantissa.
749 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
750 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
751 // Set the higher 20 bits of the mantissa.
752 __ srlv(at, int_scratch, scratch2);
753 __ or_(dst2, dst2, at);
754 __ li(at, 32);
755 __ subu(scratch2, at, scratch2);
756 __ sllv(dst1, int_scratch, scratch2);
757 __ Branch(&done);
758
759 __ bind(&fewer_than_20_useful_bits);
760 __ li(at, HeapNumber::kMantissaBitsInTopWord);
761 __ subu(scratch2, at, dst1);
762 __ sllv(scratch2, int_scratch, scratch2);
763 __ Or(dst2, dst2, scratch2);
764 // Set dst1 to 0.
765 __ mov(dst1, zero_reg);
766 }
767 __ bind(&done);
768}
769
770
771void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
772 Register object,
773 Destination destination,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000774 DoubleRegister double_dst,
Ben Murdoch257744e2011-11-30 15:57:28 +0000775 Register dst1,
776 Register dst2,
777 Register heap_number_map,
778 Register scratch1,
779 Register scratch2,
780 FPURegister single_scratch,
781 Label* not_int32) {
782 ASSERT(!scratch1.is(object) && !scratch2.is(object));
783 ASSERT(!scratch1.is(scratch2));
784 ASSERT(!heap_number_map.is(object) &&
785 !heap_number_map.is(scratch1) &&
786 !heap_number_map.is(scratch2));
787
788 Label done, obj_is_not_smi;
789
790 __ JumpIfNotSmi(object, &obj_is_not_smi);
791 __ SmiUntag(scratch1, object);
792 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
793 scratch2, single_scratch);
794 __ Branch(&done);
795
796 __ bind(&obj_is_not_smi);
797 if (FLAG_debug_code) {
798 __ AbortIfNotRootValue(heap_number_map,
799 Heap::kHeapNumberMapRootIndex,
800 "HeapNumberMap register clobbered.");
801 }
802 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
803
804 // Load the number.
805 if (CpuFeatures::IsSupported(FPU)) {
806 CpuFeatures::Scope scope(FPU);
807 // Load the double value.
808 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
809
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000810 Register except_flag = scratch2;
811 __ EmitFPUTruncate(kRoundToZero,
812 single_scratch,
813 double_dst,
814 scratch1,
815 except_flag,
816 kCheckForInexactConversion);
Ben Murdoch257744e2011-11-30 15:57:28 +0000817
818 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000819 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000820
821 if (destination == kCoreRegisters) {
822 __ Move(dst1, dst2, double_dst);
823 }
824
825 } else {
826 ASSERT(!scratch1.is(object) && !scratch2.is(object));
827 // Load the double value in the destination registers.
828 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
829 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
830
831 // Check for 0 and -0.
832 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
833 __ Or(scratch1, scratch1, Operand(dst2));
834 __ Branch(&done, eq, scratch1, Operand(zero_reg));
835
836 // Check that the value can be exactly represented by a 32-bit integer.
837 // Jump to not_int32 if that's not the case.
838 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
839
840 // dst1 and dst2 were trashed. Reload the double value.
841 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
842 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
843 }
844
845 __ bind(&done);
846}
847
848
849void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
850 Register object,
851 Register dst,
852 Register heap_number_map,
853 Register scratch1,
854 Register scratch2,
855 Register scratch3,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000856 DoubleRegister double_scratch,
Ben Murdoch257744e2011-11-30 15:57:28 +0000857 Label* not_int32) {
858 ASSERT(!dst.is(object));
859 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
860 ASSERT(!scratch1.is(scratch2) &&
861 !scratch1.is(scratch3) &&
862 !scratch2.is(scratch3));
863
864 Label done;
865
866 // Untag the object into the destination register.
867 __ SmiUntag(dst, object);
868 // Just return if the object is a smi.
869 __ JumpIfSmi(object, &done);
870
871 if (FLAG_debug_code) {
872 __ AbortIfNotRootValue(heap_number_map,
873 Heap::kHeapNumberMapRootIndex,
874 "HeapNumberMap register clobbered.");
875 }
876 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
877
878 // Object is a heap number.
879 // Convert the floating point value to a 32-bit integer.
880 if (CpuFeatures::IsSupported(FPU)) {
881 CpuFeatures::Scope scope(FPU);
882 // Load the double value.
883 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
884
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000885 FPURegister single_scratch = double_scratch.low();
886 Register except_flag = scratch2;
887 __ EmitFPUTruncate(kRoundToZero,
888 single_scratch,
889 double_scratch,
890 scratch1,
891 except_flag,
892 kCheckForInexactConversion);
Ben Murdoch257744e2011-11-30 15:57:28 +0000893
894 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000895 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000896 // Get the result in the destination register.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000897 __ mfc1(dst, single_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +0000898
899 } else {
900 // Load the double value in the destination registers.
901 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
902 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
903
904 // Check for 0 and -0.
905 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
906 __ Or(dst, scratch2, Operand(dst));
907 __ Branch(&done, eq, dst, Operand(zero_reg));
908
909 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
910
911 // Registers state after DoubleIs32BitInteger.
912 // dst: mantissa[51:20].
913 // scratch2: 1
914
915 // Shift back the higher bits of the mantissa.
916 __ srlv(dst, dst, scratch3);
917 // Set the implicit first bit.
918 __ li(at, 32);
919 __ subu(scratch3, at, scratch3);
920 __ sllv(scratch2, scratch2, scratch3);
921 __ Or(dst, dst, scratch2);
922 // Set the sign.
923 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
924 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
925 Label skip_sub;
926 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
927 __ Subu(dst, zero_reg, dst);
928 __ bind(&skip_sub);
929 }
930
931 __ bind(&done);
932}
933
934
935void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
936 Register src1,
937 Register src2,
938 Register dst,
939 Register scratch,
940 Label* not_int32) {
941 // Get exponent alone in scratch.
942 __ Ext(scratch,
943 src1,
944 HeapNumber::kExponentShift,
945 HeapNumber::kExponentBits);
946
947 // Substract the bias from the exponent.
948 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
949
950 // src1: higher (exponent) part of the double value.
951 // src2: lower (mantissa) part of the double value.
952 // scratch: unbiased exponent.
953
954 // Fast cases. Check for obvious non 32-bit integer values.
955 // Negative exponent cannot yield 32-bit integers.
956 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
957 // Exponent greater than 31 cannot yield 32-bit integers.
958 // Also, a positive value with an exponent equal to 31 is outside of the
959 // signed 32-bit integer range.
960 // Another way to put it is that if (exponent - signbit) > 30 then the
961 // number cannot be represented as an int32.
962 Register tmp = dst;
963 __ srl(at, src1, 31);
964 __ subu(tmp, scratch, at);
965 __ Branch(not_int32, gt, tmp, Operand(30));
966 // - Bits [21:0] in the mantissa are not null.
967 __ And(tmp, src2, 0x3fffff);
968 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
969
970 // Otherwise the exponent needs to be big enough to shift left all the
971 // non zero bits left. So we need the (30 - exponent) last bits of the
972 // 31 higher bits of the mantissa to be null.
973 // Because bits [21:0] are null, we can check instead that the
974 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
975
976 // Get the 32 higher bits of the mantissa in dst.
977 __ Ext(dst,
978 src2,
979 HeapNumber::kMantissaBitsInTopWord,
980 32 - HeapNumber::kMantissaBitsInTopWord);
981 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
982 __ or_(dst, dst, at);
983
984 // Create the mask and test the lower bits (of the higher bits).
985 __ li(at, 32);
986 __ subu(scratch, at, scratch);
987 __ li(src2, 1);
988 __ sllv(src1, src2, scratch);
989 __ Subu(src1, src1, Operand(1));
990 __ And(src1, dst, src1);
991 __ Branch(not_int32, ne, src1, Operand(zero_reg));
992}
993
994
995void FloatingPointHelper::CallCCodeForDoubleOperation(
996 MacroAssembler* masm,
997 Token::Value op,
998 Register heap_number_result,
999 Register scratch) {
1000 // Using core registers:
1001 // a0: Left value (least significant part of mantissa).
1002 // a1: Left value (sign, exponent, top of mantissa).
1003 // a2: Right value (least significant part of mantissa).
1004 // a3: Right value (sign, exponent, top of mantissa).
1005
1006 // Assert that heap_number_result is saved.
1007 // We currently always use s0 to pass it.
1008 ASSERT(heap_number_result.is(s0));
1009
1010 // Push the current return address before the C call.
1011 __ push(ra);
1012 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
1013 if (!IsMipsSoftFloatABI) {
1014 CpuFeatures::Scope scope(FPU);
1015 // We are not using MIPS FPU instructions, and parameters for the runtime
1016 // function call are prepaired in a0-a3 registers, but function we are
1017 // calling is compiled with hard-float flag and expecting hard float ABI
1018 // (parameters in f12/f14 registers). We need to copy parameters from
1019 // a0-a3 registers to f12/f14 register pairs.
1020 __ Move(f12, a0, a1);
1021 __ Move(f14, a2, a3);
1022 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001023 {
1024 AllowExternalCallThatCantCauseGC scope(masm);
1025 __ CallCFunction(
1026 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1027 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001028 // Store answer in the overwritable heap number.
1029 if (!IsMipsSoftFloatABI) {
1030 CpuFeatures::Scope scope(FPU);
1031 // Double returned in register f0.
1032 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1033 } else {
1034 // Double returned in registers v0 and v1.
1035 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1036 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1037 }
1038 // Place heap_number_result in v0 and return to the pushed return address.
1039 __ mov(v0, heap_number_result);
1040 __ pop(ra);
1041 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001042}
1043
1044
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001045bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1046 // These variants are compiled ahead of time. See next method.
1047 if (the_int_.is(a1) &&
1048 the_heap_number_.is(v0) &&
1049 scratch_.is(a2) &&
1050 sign_.is(a3)) {
1051 return true;
1052 }
1053 if (the_int_.is(a2) &&
1054 the_heap_number_.is(v0) &&
1055 scratch_.is(a3) &&
1056 sign_.is(a0)) {
1057 return true;
1058 }
1059 // Other register combinations are generated as and when they are needed,
1060 // so it is unsafe to call them from stubs (we can't generate a stub while
1061 // we are generating a stub).
1062 return false;
1063}
1064
1065
1066void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1067 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1068 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1069 stub1.GetCode()->set_is_pregenerated(true);
1070 stub2.GetCode()->set_is_pregenerated(true);
1071}
1072
1073
Steve Block44f0eee2011-05-26 01:26:41 +01001074// See comment for class, this does NOT work for int32's that are in Smi range.
1075void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001076 Label max_negative_int;
1077 // the_int_ has the answer which is a signed int32 but not a Smi.
1078 // We test for the special value that has a different exponent.
1079 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1080 // Test sign, and save for later conditionals.
1081 __ And(sign_, the_int_, Operand(0x80000000u));
1082 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1083
1084 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1085 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1086 uint32_t non_smi_exponent =
1087 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1088 __ li(scratch_, Operand(non_smi_exponent));
1089 // Set the sign bit in scratch_ if the value was negative.
1090 __ or_(scratch_, scratch_, sign_);
1091 // Subtract from 0 if the value was negative.
1092 __ subu(at, zero_reg, the_int_);
1093 __ movn(the_int_, at, sign_);
1094 // We should be masking the implict first digit of the mantissa away here,
1095 // but it just ends up combining harmlessly with the last digit of the
1096 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1097 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1098 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1099 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1100 __ srl(at, the_int_, shift_distance);
1101 __ or_(scratch_, scratch_, at);
1102 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1103 HeapNumber::kExponentOffset));
1104 __ sll(scratch_, the_int_, 32 - shift_distance);
1105 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1106 HeapNumber::kMantissaOffset));
1107 __ Ret();
1108
1109 __ bind(&max_negative_int);
1110 // The max negative int32 is stored as a positive number in the mantissa of
1111 // a double because it uses a sign bit instead of using two's complement.
1112 // The actual mantissa bits stored are all 0 because the implicit most
1113 // significant 1 bit is not stored.
1114 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1115 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1116 __ sw(scratch_,
1117 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1118 __ mov(scratch_, zero_reg);
1119 __ sw(scratch_,
1120 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1121 __ Ret();
1122}
1123
1124
1125// Handle the case where the lhs and rhs are the same object.
1126// Equality is almost reflexive (everything but NaN), so this is a test
1127// for "identity and not NaN".
1128static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1129 Label* slow,
1130 Condition cc,
1131 bool never_nan_nan) {
1132 Label not_identical;
1133 Label heap_number, return_equal;
1134 Register exp_mask_reg = t5;
1135
1136 __ Branch(&not_identical, ne, a0, Operand(a1));
1137
1138 // The two objects are identical. If we know that one of them isn't NaN then
1139 // we now know they test equal.
1140 if (cc != eq || !never_nan_nan) {
1141 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1142
1143 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1144 // so we do the second best thing - test it ourselves.
1145 // They are both equal and they are not both Smis so both of them are not
1146 // Smis. If it's not a heap number, then return equal.
1147 if (cc == less || cc == greater) {
1148 __ GetObjectType(a0, t4, t4);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001149 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001150 } else {
1151 __ GetObjectType(a0, t4, t4);
1152 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1153 // Comparing JS objects with <=, >= is complicated.
1154 if (cc != eq) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001155 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001156 // Normally here we fall through to return_equal, but undefined is
1157 // special: (undefined == undefined) == true, but
1158 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1159 if (cc == less_equal || cc == greater_equal) {
1160 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1161 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1162 __ Branch(&return_equal, ne, a0, Operand(t2));
1163 if (cc == le) {
1164 // undefined <= undefined should fail.
1165 __ li(v0, Operand(GREATER));
1166 } else {
1167 // undefined >= undefined should fail.
1168 __ li(v0, Operand(LESS));
1169 }
1170 __ Ret();
1171 }
1172 }
1173 }
1174 }
1175
1176 __ bind(&return_equal);
1177 if (cc == less) {
1178 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1179 } else if (cc == greater) {
1180 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1181 } else {
1182 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1183 }
1184 __ Ret();
1185
1186 if (cc != eq || !never_nan_nan) {
1187 // For less and greater we don't have to check for NaN since the result of
1188 // x < x is false regardless. For the others here is some code to check
1189 // for NaN.
1190 if (cc != lt && cc != gt) {
1191 __ bind(&heap_number);
1192 // It is a heap number, so return non-equal if it's NaN and equal if it's
1193 // not NaN.
1194
1195 // The representation of NaN values has all exponent bits (52..62) set,
1196 // and not all mantissa bits (0..51) clear.
1197 // Read top bits of double representation (second word of value).
1198 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1199 // Test that exponent bits are all set.
1200 __ And(t3, t2, Operand(exp_mask_reg));
1201 // If all bits not set (ne cond), then not a NaN, objects are equal.
1202 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1203
1204 // Shift out flag and all exponent bits, retaining only mantissa.
1205 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1206 // Or with all low-bits of mantissa.
1207 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1208 __ Or(v0, t3, Operand(t2));
1209 // For equal we already have the right value in v0: Return zero (equal)
1210 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1211 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1212 // value if it's a NaN.
1213 if (cc != eq) {
1214 // All-zero means Infinity means equal.
1215 __ Ret(eq, v0, Operand(zero_reg));
1216 if (cc == le) {
1217 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1218 } else {
1219 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1220 }
1221 }
1222 __ Ret();
1223 }
1224 // No fall through here.
1225 }
1226
1227 __ bind(&not_identical);
1228}
1229
1230
1231static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1232 Register lhs,
1233 Register rhs,
1234 Label* both_loaded_as_doubles,
1235 Label* slow,
1236 bool strict) {
1237 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1238 (lhs.is(a1) && rhs.is(a0)));
1239
1240 Label lhs_is_smi;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001241 __ JumpIfSmi(lhs, &lhs_is_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +00001242 // Rhs is a Smi.
1243 // Check whether the non-smi is a heap number.
1244 __ GetObjectType(lhs, t4, t4);
1245 if (strict) {
1246 // If lhs was not a number and rhs was a Smi then strict equality cannot
1247 // succeed. Return non-equal (lhs is already not zero).
1248 __ mov(v0, lhs);
1249 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1250 } else {
1251 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1252 // the runtime.
1253 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1254 }
1255
1256 // Rhs is a smi, lhs is a number.
1257 // Convert smi rhs to double.
1258 if (CpuFeatures::IsSupported(FPU)) {
1259 CpuFeatures::Scope scope(FPU);
1260 __ sra(at, rhs, kSmiTagSize);
1261 __ mtc1(at, f14);
1262 __ cvt_d_w(f14, f14);
1263 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1264 } else {
1265 // Load lhs to a double in a2, a3.
1266 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1267 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1268
1269 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1270 __ mov(t6, rhs);
1271 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1272 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001273 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001274
1275 __ pop(ra);
1276 }
1277
1278 // We now have both loaded as doubles.
1279 __ jmp(both_loaded_as_doubles);
1280
1281 __ bind(&lhs_is_smi);
1282 // Lhs is a Smi. Check whether the non-smi is a heap number.
1283 __ GetObjectType(rhs, t4, t4);
1284 if (strict) {
1285 // If lhs was not a number and rhs was a Smi then strict equality cannot
1286 // succeed. Return non-equal.
1287 __ li(v0, Operand(1));
1288 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1289 } else {
1290 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1291 // the runtime.
1292 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1293 }
1294
1295 // Lhs is a smi, rhs is a number.
1296 // Convert smi lhs to double.
1297 if (CpuFeatures::IsSupported(FPU)) {
1298 CpuFeatures::Scope scope(FPU);
1299 __ sra(at, lhs, kSmiTagSize);
1300 __ mtc1(at, f12);
1301 __ cvt_d_w(f12, f12);
1302 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1303 } else {
1304 // Convert lhs to a double format. t5 is scratch.
1305 __ mov(t6, lhs);
1306 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1307 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001308 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001309 __ pop(ra);
1310 // Load rhs to a double in a1, a0.
1311 if (rhs.is(a0)) {
1312 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1313 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1314 } else {
1315 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1316 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1317 }
1318 }
1319 // Fall through to both_loaded_as_doubles.
Steve Block44f0eee2011-05-26 01:26:41 +01001320}
1321
1322
1323void EmitNanCheck(MacroAssembler* masm, Condition cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001324 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1325 if (CpuFeatures::IsSupported(FPU)) {
1326 CpuFeatures::Scope scope(FPU);
1327 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1328 __ Move(t0, t1, f14);
1329 __ Move(t2, t3, f12);
1330 } else {
1331 // Lhs and rhs are already loaded to GP registers.
1332 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1333 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1334 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1335 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1336 }
1337 Register rhs_exponent = exp_first ? t0 : t1;
1338 Register lhs_exponent = exp_first ? t2 : t3;
1339 Register rhs_mantissa = exp_first ? t1 : t0;
1340 Register lhs_mantissa = exp_first ? t3 : t2;
1341 Label one_is_nan, neither_is_nan;
1342 Label lhs_not_nan_exp_mask_is_loaded;
1343
1344 Register exp_mask_reg = t4;
1345 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1346 __ and_(t5, lhs_exponent, exp_mask_reg);
1347 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1348
1349 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1350 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1351
1352 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1353
1354 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1355 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1356 __ and_(t5, rhs_exponent, exp_mask_reg);
1357
1358 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1359
1360 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1361 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1362
1363 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1364
1365 __ bind(&one_is_nan);
1366 // NaN comparisons always fail.
1367 // Load whatever we need in v0 to make the comparison fail.
1368 if (cc == lt || cc == le) {
1369 __ li(v0, Operand(GREATER));
1370 } else {
1371 __ li(v0, Operand(LESS));
1372 }
1373 __ Ret(); // Return.
1374
1375 __ bind(&neither_is_nan);
1376}
1377
1378
1379static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1380 // f12 and f14 have the two doubles. Neither is a NaN.
1381 // Call a native function to do a comparison between two non-NaNs.
1382 // Call C routine that may not cause GC or other trouble.
1383 // We use a call_was and return manually because we need arguments slots to
1384 // be freed.
1385
1386 Label return_result_not_equal, return_result_equal;
1387 if (cc == eq) {
1388 // Doubles are not equal unless they have the same bit pattern.
1389 // Exception: 0 and -0.
1390 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1391 if (CpuFeatures::IsSupported(FPU)) {
1392 CpuFeatures::Scope scope(FPU);
1393 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1394 __ Move(t0, t1, f14);
1395 __ Move(t2, t3, f12);
1396 } else {
1397 // Lhs and rhs are already loaded to GP registers.
1398 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1399 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1400 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1401 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1402 }
1403 Register rhs_exponent = exp_first ? t0 : t1;
1404 Register lhs_exponent = exp_first ? t2 : t3;
1405 Register rhs_mantissa = exp_first ? t1 : t0;
1406 Register lhs_mantissa = exp_first ? t3 : t2;
1407
1408 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1409 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1410
1411 __ subu(v0, rhs_exponent, lhs_exponent);
1412 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1413 // 0, -0 case.
1414 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1415 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1416 __ or_(t4, rhs_exponent, lhs_exponent);
1417 __ or_(t4, t4, rhs_mantissa);
1418
1419 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1420
1421 __ bind(&return_result_equal);
1422 __ li(v0, Operand(EQUAL));
1423 __ Ret();
1424 }
1425
1426 __ bind(&return_result_not_equal);
1427
1428 if (!CpuFeatures::IsSupported(FPU)) {
1429 __ push(ra);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001430 __ PrepareCallCFunction(0, 2, t4);
Ben Murdoch257744e2011-11-30 15:57:28 +00001431 if (!IsMipsSoftFloatABI) {
1432 // We are not using MIPS FPU instructions, and parameters for the runtime
1433 // function call are prepaired in a0-a3 registers, but function we are
1434 // calling is compiled with hard-float flag and expecting hard float ABI
1435 // (parameters in f12/f14 registers). We need to copy parameters from
1436 // a0-a3 registers to f12/f14 register pairs.
1437 __ Move(f12, a0, a1);
1438 __ Move(f14, a2, a3);
1439 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001440
1441 AllowExternalCallThatCantCauseGC scope(masm);
1442 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1443 0, 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00001444 __ pop(ra); // Because this function returns int, result is in v0.
1445 __ Ret();
1446 } else {
1447 CpuFeatures::Scope scope(FPU);
1448 Label equal, less_than;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001449 __ BranchF(&equal, NULL, eq, f12, f14);
1450 __ BranchF(&less_than, NULL, lt, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001451
1452 // Not equal, not less, not NaN, must be greater.
1453 __ li(v0, Operand(GREATER));
1454 __ Ret();
1455
1456 __ bind(&equal);
1457 __ li(v0, Operand(EQUAL));
1458 __ Ret();
1459
1460 __ bind(&less_than);
1461 __ li(v0, Operand(LESS));
1462 __ Ret();
1463 }
1464}
1465
1466
1467static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1468 Register lhs,
1469 Register rhs) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001470 // If either operand is a JS object or an oddball value, then they are
Ben Murdoch257744e2011-11-30 15:57:28 +00001471 // not equal since their pointers are different.
1472 // There is no test for undetectability in strict equality.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001473 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Ben Murdoch257744e2011-11-30 15:57:28 +00001474 Label first_non_object;
1475 // Get the type of the first operand into a2 and compare it with
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001476 // FIRST_SPEC_OBJECT_TYPE.
Ben Murdoch257744e2011-11-30 15:57:28 +00001477 __ GetObjectType(lhs, a2, a2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001478 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001479
1480 // Return non-zero.
1481 Label return_not_equal;
1482 __ bind(&return_not_equal);
1483 __ li(v0, Operand(1));
1484 __ Ret();
1485
1486 __ bind(&first_non_object);
1487 // Check for oddballs: true, false, null, undefined.
1488 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1489
1490 __ GetObjectType(rhs, a3, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001491 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001492
1493 // Check for oddballs: true, false, null, undefined.
1494 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1495
1496 // Now that we have the types we might as well check for symbol-symbol.
1497 // Ensure that no non-strings have the symbol bit set.
1498 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1499 STATIC_ASSERT(kSymbolTag != 0);
1500 __ And(t2, a2, Operand(a3));
1501 __ And(t0, t2, Operand(kIsSymbolMask));
1502 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1503}
1504
1505
1506static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1507 Register lhs,
1508 Register rhs,
1509 Label* both_loaded_as_doubles,
1510 Label* not_heap_numbers,
1511 Label* slow) {
1512 __ GetObjectType(lhs, a3, a2);
1513 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1514 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1515 // If first was a heap number & second wasn't, go to slow case.
1516 __ Branch(slow, ne, a3, Operand(a2));
1517
1518 // Both are heap numbers. Load them up then jump to the code we have
1519 // for that.
1520 if (CpuFeatures::IsSupported(FPU)) {
1521 CpuFeatures::Scope scope(FPU);
1522 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1523 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1524 } else {
1525 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1526 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1527 if (rhs.is(a0)) {
1528 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1529 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1530 } else {
1531 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1532 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1533 }
1534 }
1535 __ jmp(both_loaded_as_doubles);
1536}
1537
1538
1539// Fast negative check for symbol-to-symbol equality.
1540static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1541 Register lhs,
1542 Register rhs,
1543 Label* possible_strings,
1544 Label* not_both_strings) {
1545 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1546 (lhs.is(a1) && rhs.is(a0)));
1547
1548 // a2 is object type of lhs.
1549 // Ensure that no non-strings have the symbol bit set.
1550 Label object_test;
1551 STATIC_ASSERT(kSymbolTag != 0);
1552 __ And(at, a2, Operand(kIsNotStringMask));
1553 __ Branch(&object_test, ne, at, Operand(zero_reg));
1554 __ And(at, a2, Operand(kIsSymbolMask));
1555 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1556 __ GetObjectType(rhs, a3, a3);
1557 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1558 __ And(at, a3, Operand(kIsSymbolMask));
1559 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1560
1561 // Both are symbols. We already checked they weren't the same pointer
1562 // so they are not equal.
1563 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1564 __ Ret();
1565
1566 __ bind(&object_test);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001567 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001568 __ GetObjectType(rhs, a2, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001569 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001570
1571 // If both objects are undetectable, they are equal. Otherwise, they
1572 // are not equal, since they are different objects and an object is not
1573 // equal to undefined.
1574 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1575 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1576 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1577 __ and_(a0, a2, a3);
1578 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1579 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1580 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001581}
1582
1583
1584void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1585 Register object,
1586 Register result,
1587 Register scratch1,
1588 Register scratch2,
1589 Register scratch3,
1590 bool object_is_smi,
1591 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001592 // Use of registers. Register result is used as a temporary.
1593 Register number_string_cache = result;
1594 Register mask = scratch3;
1595
1596 // Load the number string cache.
1597 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1598
1599 // Make the hash mask from the length of the number string cache. It
1600 // contains two elements (number and string) for each cache entry.
1601 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1602 // Divide length by two (length is a smi).
1603 __ sra(mask, mask, kSmiTagSize + 1);
1604 __ Addu(mask, mask, -1); // Make mask.
1605
1606 // Calculate the entry in the number string cache. The hash value in the
1607 // number string cache for smis is just the smi value, and the hash for
1608 // doubles is the xor of the upper and lower words. See
1609 // Heap::GetNumberStringCache.
1610 Isolate* isolate = masm->isolate();
1611 Label is_smi;
1612 Label load_result_from_cache;
1613 if (!object_is_smi) {
1614 __ JumpIfSmi(object, &is_smi);
1615 if (CpuFeatures::IsSupported(FPU)) {
1616 CpuFeatures::Scope scope(FPU);
1617 __ CheckMap(object,
1618 scratch1,
1619 Heap::kHeapNumberMapRootIndex,
1620 not_found,
1621 DONT_DO_SMI_CHECK);
1622
1623 STATIC_ASSERT(8 == kDoubleSize);
1624 __ Addu(scratch1,
1625 object,
1626 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1627 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1628 __ lw(scratch1, MemOperand(scratch1, 0));
1629 __ Xor(scratch1, scratch1, Operand(scratch2));
1630 __ And(scratch1, scratch1, Operand(mask));
1631
1632 // Calculate address of entry in string cache: each entry consists
1633 // of two pointer sized fields.
1634 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1635 __ Addu(scratch1, number_string_cache, scratch1);
1636
1637 Register probe = mask;
1638 __ lw(probe,
1639 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1640 __ JumpIfSmi(probe, not_found);
1641 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1642 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001643 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001644 __ Branch(not_found);
1645 } else {
1646 // Note that there is no cache check for non-FPU case, even though
1647 // it seems there could be. May be a tiny opimization for non-FPU
1648 // cores.
1649 __ Branch(not_found);
1650 }
1651 }
1652
1653 __ bind(&is_smi);
1654 Register scratch = scratch1;
1655 __ sra(scratch, object, 1); // Shift away the tag.
1656 __ And(scratch, mask, Operand(scratch));
1657
1658 // Calculate address of entry in string cache: each entry consists
1659 // of two pointer sized fields.
1660 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1661 __ Addu(scratch, number_string_cache, scratch);
1662
1663 // Check if the entry is the smi we are looking for.
1664 Register probe = mask;
1665 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1666 __ Branch(not_found, ne, object, Operand(probe));
1667
1668 // Get the result from the cache.
1669 __ bind(&load_result_from_cache);
1670 __ lw(result,
1671 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1672
1673 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1674 1,
1675 scratch1,
1676 scratch2);
Steve Block44f0eee2011-05-26 01:26:41 +01001677}
1678
1679
1680void NumberToStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001681 Label runtime;
1682
1683 __ lw(a1, MemOperand(sp, 0));
1684
1685 // Generate code to lookup number in the number string cache.
1686 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1687 __ Addu(sp, sp, Operand(1 * kPointerSize));
1688 __ Ret();
1689
1690 __ bind(&runtime);
1691 // Handle number to string in the runtime system if not found in the cache.
1692 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01001693}
1694
1695
1696// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1697// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1698// of the comparison.
1699void CompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001700 Label slow; // Call builtin.
1701 Label not_smis, both_loaded_as_doubles;
1702
1703
1704 if (include_smi_compare_) {
1705 Label not_two_smis, smi_done;
1706 __ Or(a2, a1, a0);
1707 __ JumpIfNotSmi(a2, &not_two_smis);
1708 __ sra(a1, a1, 1);
1709 __ sra(a0, a0, 1);
1710 __ Subu(v0, a1, a0);
1711 __ Ret();
1712 __ bind(&not_two_smis);
1713 } else if (FLAG_debug_code) {
1714 __ Or(a2, a1, a0);
1715 __ And(a2, a2, kSmiTagMask);
1716 __ Assert(ne, "CompareStub: unexpected smi operands.",
1717 a2, Operand(zero_reg));
1718 }
1719
1720
1721 // NOTICE! This code is only reached after a smi-fast-case check, so
1722 // it is certain that at least one operand isn't a smi.
1723
1724 // Handle the case where the objects are identical. Either returns the answer
1725 // or goes to slow. Only falls through if the objects were not identical.
1726 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1727
1728 // If either is a Smi (we know that not both are), then they can only
1729 // be strictly equal if the other is a HeapNumber.
1730 STATIC_ASSERT(kSmiTag == 0);
1731 ASSERT_EQ(0, Smi::FromInt(0));
1732 __ And(t2, lhs_, Operand(rhs_));
1733 __ JumpIfNotSmi(t2, &not_smis, t0);
1734 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1735 // 1) Return the answer.
1736 // 2) Go to slow.
1737 // 3) Fall through to both_loaded_as_doubles.
1738 // 4) Jump to rhs_not_nan.
1739 // In cases 3 and 4 we have found out we were dealing with a number-number
1740 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1741 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1742 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1743 &both_loaded_as_doubles, &slow, strict_);
1744
1745 __ bind(&both_loaded_as_doubles);
1746 // f12, f14 are the double representations of the left hand side
1747 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1748 // left hand side and a0, a1 represent right hand side.
1749
1750 Isolate* isolate = masm->isolate();
1751 if (CpuFeatures::IsSupported(FPU)) {
1752 CpuFeatures::Scope scope(FPU);
1753 Label nan;
1754 __ li(t0, Operand(LESS));
1755 __ li(t1, Operand(GREATER));
1756 __ li(t2, Operand(EQUAL));
1757
1758 // Check if either rhs or lhs is NaN.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001759 __ BranchF(NULL, &nan, eq, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001760
1761 // Check if LESS condition is satisfied. If true, move conditionally
1762 // result to v0.
1763 __ c(OLT, D, f12, f14);
1764 __ movt(v0, t0);
1765 // Use previous check to store conditionally to v0 oposite condition
1766 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1767 // check.
1768 __ movf(v0, t1);
1769 // Check if EQUAL condition is satisfied. If true, move conditionally
1770 // result to v0.
1771 __ c(EQ, D, f12, f14);
1772 __ movt(v0, t2);
1773
1774 __ Ret();
1775
1776 __ bind(&nan);
1777 // NaN comparisons always fail.
1778 // Load whatever we need in v0 to make the comparison fail.
1779 if (cc_ == lt || cc_ == le) {
1780 __ li(v0, Operand(GREATER));
1781 } else {
1782 __ li(v0, Operand(LESS));
1783 }
1784 __ Ret();
1785 } else {
1786 // Checks for NaN in the doubles we have loaded. Can return the answer or
1787 // fall through if neither is a NaN. Also binds rhs_not_nan.
1788 EmitNanCheck(masm, cc_);
1789
1790 // Compares two doubles that are not NaNs. Returns the answer.
1791 // Never falls through.
1792 EmitTwoNonNanDoubleComparison(masm, cc_);
1793 }
1794
1795 __ bind(&not_smis);
1796 // At this point we know we are dealing with two different objects,
1797 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1798 if (strict_) {
1799 // This returns non-equal for some object types, or falls through if it
1800 // was not lucky.
1801 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1802 }
1803
1804 Label check_for_symbols;
1805 Label flat_string_check;
1806 // Check for heap-number-heap-number comparison. Can jump to slow case,
1807 // or load both doubles and jump to the code that handles
1808 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1809 // In this case a2 will contain the type of lhs_.
1810 EmitCheckForTwoHeapNumbers(masm,
1811 lhs_,
1812 rhs_,
1813 &both_loaded_as_doubles,
1814 &check_for_symbols,
1815 &flat_string_check);
1816
1817 __ bind(&check_for_symbols);
1818 if (cc_ == eq && !strict_) {
1819 // Returns an answer for two symbols or two detectable objects.
1820 // Otherwise jumps to string case or not both strings case.
1821 // Assumes that a2 is the type of lhs_ on entry.
1822 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1823 }
1824
1825 // Check for both being sequential ASCII strings, and inline if that is the
1826 // case.
1827 __ bind(&flat_string_check);
1828
1829 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1830
1831 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1832 if (cc_ == eq) {
1833 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1834 lhs_,
1835 rhs_,
1836 a2,
1837 a3,
1838 t0);
1839 } else {
1840 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1841 lhs_,
1842 rhs_,
1843 a2,
1844 a3,
1845 t0,
1846 t1);
1847 }
1848 // Never falls through to here.
1849
1850 __ bind(&slow);
1851 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1852 // a1 (rhs) second.
1853 __ Push(lhs_, rhs_);
1854 // Figure out which native to call and setup the arguments.
1855 Builtins::JavaScript native;
1856 if (cc_ == eq) {
1857 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1858 } else {
1859 native = Builtins::COMPARE;
1860 int ncr; // NaN compare result.
1861 if (cc_ == lt || cc_ == le) {
1862 ncr = GREATER;
1863 } else {
1864 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1865 ncr = LESS;
1866 }
1867 __ li(a0, Operand(Smi::FromInt(ncr)));
1868 __ push(a0);
1869 }
1870
1871 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1872 // tagged as a small integer.
1873 __ InvokeBuiltin(native, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +01001874}
1875
1876
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001877// The stub expects its argument in the tos_ register and returns its result in
1878// it, too: zero for false, and a non-zero value for true.
Steve Block44f0eee2011-05-26 01:26:41 +01001879void ToBooleanStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001880 // This stub uses FPU instructions.
1881 CpuFeatures::Scope scope(FPU);
1882
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001883 Label patch;
1884 const Register map = t5.is(tos_) ? t3 : t5;
Ben Murdoch257744e2011-11-30 15:57:28 +00001885
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001886 // undefined -> false.
1887 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
Ben Murdoch257744e2011-11-30 15:57:28 +00001888
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001889 // Boolean -> its value.
1890 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1891 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00001892
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001893 // 'null' -> false.
1894 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
Ben Murdoch257744e2011-11-30 15:57:28 +00001895
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001896 if (types_.Contains(SMI)) {
1897 // Smis: 0 -> false, all other -> true
1898 __ And(at, tos_, kSmiTagMask);
1899 // tos_ contains the correct return value already
1900 __ Ret(eq, at, Operand(zero_reg));
1901 } else if (types_.NeedsMap()) {
1902 // If we need a map later and have a Smi -> patch.
1903 __ JumpIfSmi(tos_, &patch);
1904 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001905
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001906 if (types_.NeedsMap()) {
1907 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00001908
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001909 if (types_.CanBeUndetectable()) {
1910 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1911 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1912 // Undetectable -> false.
1913 __ movn(tos_, zero_reg, at);
1914 __ Ret(ne, at, Operand(zero_reg));
1915 }
1916 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001917
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001918 if (types_.Contains(SPEC_OBJECT)) {
1919 // Spec object -> true.
1920 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1921 // tos_ contains the correct non-zero return value already.
1922 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1923 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001924
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001925 if (types_.Contains(STRING)) {
1926 // String value -> false iff empty.
1927 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1928 Label skip;
1929 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1930 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1931 __ Ret(); // the string length is OK as the return value
1932 __ bind(&skip);
1933 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001934
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001935 if (types_.Contains(HEAP_NUMBER)) {
1936 // Heap number -> false iff +0, -0, or NaN.
1937 Label not_heap_number;
1938 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1939 __ Branch(&not_heap_number, ne, map, Operand(at));
1940 Label zero_or_nan, number;
1941 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1942 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1943 // "tos_" is a register, and contains a non zero value by default.
1944 // Hence we only need to overwrite "tos_" with zero to return false for
1945 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1946 __ bind(&zero_or_nan);
1947 __ mov(tos_, zero_reg);
1948 __ bind(&number);
1949 __ Ret();
1950 __ bind(&not_heap_number);
1951 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001952
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001953 __ bind(&patch);
1954 GenerateTypeTransition(masm);
1955}
Ben Murdoch257744e2011-11-30 15:57:28 +00001956
Ben Murdoch257744e2011-11-30 15:57:28 +00001957
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001958void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1959 Type type,
1960 Heap::RootListIndex value,
1961 bool result) {
1962 if (types_.Contains(type)) {
1963 // If we see an expected oddball, return its ToBoolean value tos_.
1964 __ LoadRoot(at, value);
1965 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1966 // The value of a root is never NULL, so we can avoid loading a non-null
1967 // value into tos_ when we want to return 'true'.
1968 if (!result) {
1969 __ movz(tos_, zero_reg, at);
1970 }
1971 __ Ret(eq, at, Operand(zero_reg));
1972 }
1973}
Ben Murdoch257744e2011-11-30 15:57:28 +00001974
Ben Murdoch257744e2011-11-30 15:57:28 +00001975
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001976void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1977 __ Move(a3, tos_);
1978 __ li(a2, Operand(Smi::FromInt(tos_.code())));
1979 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1980 __ Push(a3, a2, a1);
1981 // Patch the caller to an appropriate specialized stub and return the
1982 // operation result to the caller of the stub.
1983 __ TailCallExternalReference(
1984 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1985 3,
1986 1);
1987}
1988
1989
1990void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1991 // We don't allow a GC during a store buffer overflow so there is no need to
1992 // store the registers in any particular way, but we do have to store and
1993 // restore them.
1994 __ MultiPush(kJSCallerSaved | ra.bit());
1995 if (save_doubles_ == kSaveFPRegs) {
1996 CpuFeatures::Scope scope(FPU);
1997 __ MultiPushFPU(kCallerSavedFPU);
1998 }
1999 const int argument_count = 1;
2000 const int fp_argument_count = 0;
2001 const Register scratch = a1;
2002
2003 AllowExternalCallThatCantCauseGC scope(masm);
2004 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2005 __ li(a0, Operand(ExternalReference::isolate_address()));
2006 __ CallCFunction(
2007 ExternalReference::store_buffer_overflow_function(masm->isolate()),
2008 argument_count);
2009 if (save_doubles_ == kSaveFPRegs) {
2010 CpuFeatures::Scope scope(FPU);
2011 __ MultiPopFPU(kCallerSavedFPU);
2012 }
2013
2014 __ MultiPop(kJSCallerSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002015 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01002016}
2017
2018
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002019void UnaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002020 const char* op_name = Token::Name(op_);
2021 const char* overwrite_name = NULL; // Make g++ happy.
2022 switch (mode_) {
2023 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2024 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2025 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002026 stream->Add("UnaryOpStub_%s_%s_%s",
2027 op_name,
2028 overwrite_name,
2029 UnaryOpIC::GetName(operand_type_));
Ben Murdoch257744e2011-11-30 15:57:28 +00002030}
2031
2032
2033// TODO(svenpanne): Use virtual functions instead of switch.
2034void UnaryOpStub::Generate(MacroAssembler* masm) {
2035 switch (operand_type_) {
2036 case UnaryOpIC::UNINITIALIZED:
2037 GenerateTypeTransition(masm);
2038 break;
2039 case UnaryOpIC::SMI:
2040 GenerateSmiStub(masm);
2041 break;
2042 case UnaryOpIC::HEAP_NUMBER:
2043 GenerateHeapNumberStub(masm);
2044 break;
2045 case UnaryOpIC::GENERIC:
2046 GenerateGenericStub(masm);
2047 break;
2048 }
2049}
2050
2051
2052void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2053 // Argument is in a0 and v0 at this point, so we can overwrite a0.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002054 __ li(a2, Operand(Smi::FromInt(op_)));
2055 __ li(a1, Operand(Smi::FromInt(mode_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00002056 __ li(a0, Operand(Smi::FromInt(operand_type_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00002057 __ Push(v0, a2, a1, a0);
2058
2059 __ TailCallExternalReference(
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002060 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00002061}
2062
2063
2064// TODO(svenpanne): Use virtual functions instead of switch.
2065void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2066 switch (op_) {
2067 case Token::SUB:
2068 GenerateSmiStubSub(masm);
2069 break;
2070 case Token::BIT_NOT:
2071 GenerateSmiStubBitNot(masm);
2072 break;
2073 default:
2074 UNREACHABLE();
2075 }
2076}
2077
2078
2079void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2080 Label non_smi, slow;
2081 GenerateSmiCodeSub(masm, &non_smi, &slow);
2082 __ bind(&non_smi);
2083 __ bind(&slow);
2084 GenerateTypeTransition(masm);
2085}
2086
2087
2088void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2089 Label non_smi;
2090 GenerateSmiCodeBitNot(masm, &non_smi);
2091 __ bind(&non_smi);
2092 GenerateTypeTransition(masm);
2093}
2094
2095
2096void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2097 Label* non_smi,
2098 Label* slow) {
2099 __ JumpIfNotSmi(a0, non_smi);
2100
2101 // The result of negating zero or the smallest negative smi is not a smi.
2102 __ And(t0, a0, ~0x80000000);
2103 __ Branch(slow, eq, t0, Operand(zero_reg));
2104
2105 // Return '0 - value'.
2106 __ Subu(v0, zero_reg, a0);
2107 __ Ret();
2108}
2109
2110
2111void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2112 Label* non_smi) {
2113 __ JumpIfNotSmi(a0, non_smi);
2114
2115 // Flip bits and revert inverted smi-tag.
2116 __ Neg(v0, a0);
2117 __ And(v0, v0, ~kSmiTagMask);
2118 __ Ret();
2119}
2120
2121
2122// TODO(svenpanne): Use virtual functions instead of switch.
2123void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2124 switch (op_) {
2125 case Token::SUB:
2126 GenerateHeapNumberStubSub(masm);
2127 break;
2128 case Token::BIT_NOT:
2129 GenerateHeapNumberStubBitNot(masm);
2130 break;
2131 default:
2132 UNREACHABLE();
2133 }
2134}
2135
2136
2137void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2138 Label non_smi, slow, call_builtin;
2139 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2140 __ bind(&non_smi);
2141 GenerateHeapNumberCodeSub(masm, &slow);
2142 __ bind(&slow);
2143 GenerateTypeTransition(masm);
2144 __ bind(&call_builtin);
2145 GenerateGenericCodeFallback(masm);
2146}
2147
2148
2149void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2150 Label non_smi, slow;
2151 GenerateSmiCodeBitNot(masm, &non_smi);
2152 __ bind(&non_smi);
2153 GenerateHeapNumberCodeBitNot(masm, &slow);
2154 __ bind(&slow);
2155 GenerateTypeTransition(masm);
2156}
2157
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002158
Ben Murdoch257744e2011-11-30 15:57:28 +00002159void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2160 Label* slow) {
2161 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2162 // a0 is a heap number. Get a new heap number in a1.
2163 if (mode_ == UNARY_OVERWRITE) {
2164 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2165 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2166 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2167 } else {
2168 Label slow_allocate_heapnumber, heapnumber_allocated;
2169 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2170 __ jmp(&heapnumber_allocated);
2171
2172 __ bind(&slow_allocate_heapnumber);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002173 {
2174 FrameScope scope(masm, StackFrame::INTERNAL);
2175 __ push(a0);
2176 __ CallRuntime(Runtime::kNumberAlloc, 0);
2177 __ mov(a1, v0);
2178 __ pop(a0);
2179 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002180
2181 __ bind(&heapnumber_allocated);
2182 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2183 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2184 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2185 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2186 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2187 __ mov(v0, a1);
2188 }
2189 __ Ret();
2190}
2191
2192
2193void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2194 MacroAssembler* masm,
2195 Label* slow) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002196 Label impossible;
2197
Ben Murdoch257744e2011-11-30 15:57:28 +00002198 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2199 // Convert the heap number in a0 to an untagged integer in a1.
2200 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2201
2202 // Do the bitwise operation and check if the result fits in a smi.
2203 Label try_float;
2204 __ Neg(a1, a1);
2205 __ Addu(a2, a1, Operand(0x40000000));
2206 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2207
2208 // Tag the result as a smi and we're done.
2209 __ SmiTag(v0, a1);
2210 __ Ret();
2211
2212 // Try to store the result in a heap number.
2213 __ bind(&try_float);
2214 if (mode_ == UNARY_NO_OVERWRITE) {
2215 Label slow_allocate_heapnumber, heapnumber_allocated;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002216 // Allocate a new heap number without zapping v0, which we need if it fails.
2217 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
Ben Murdoch257744e2011-11-30 15:57:28 +00002218 __ jmp(&heapnumber_allocated);
2219
2220 __ bind(&slow_allocate_heapnumber);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002221 {
2222 FrameScope scope(masm, StackFrame::INTERNAL);
2223 __ push(v0); // Push the heap number, not the untagged int32.
2224 __ CallRuntime(Runtime::kNumberAlloc, 0);
2225 __ mov(a2, v0); // Move the new heap number into a2.
2226 // Get the heap number into v0, now that the new heap number is in a2.
2227 __ pop(v0);
2228 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002229
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002230 // Convert the heap number in v0 to an untagged integer in a1.
2231 // This can't go slow-case because it's the same number we already
2232 // converted once again.
2233 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2234 // Negate the result.
2235 __ Xor(a1, a1, -1);
2236
Ben Murdoch257744e2011-11-30 15:57:28 +00002237 __ bind(&heapnumber_allocated);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002238 __ mov(v0, a2); // Move newly allocated heap number to v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00002239 }
2240
2241 if (CpuFeatures::IsSupported(FPU)) {
2242 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2243 CpuFeatures::Scope scope(FPU);
2244 __ mtc1(a1, f0);
2245 __ cvt_d_w(f0, f0);
2246 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2247 __ Ret();
2248 } else {
2249 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2250 // have to set up a frame.
2251 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2252 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2253 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002254
2255 __ bind(&impossible);
2256 if (FLAG_debug_code) {
2257 __ stop("Incorrect assumption in bit-not stub");
2258 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002259}
2260
2261
2262// TODO(svenpanne): Use virtual functions instead of switch.
2263void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2264 switch (op_) {
2265 case Token::SUB:
2266 GenerateGenericStubSub(masm);
2267 break;
2268 case Token::BIT_NOT:
2269 GenerateGenericStubBitNot(masm);
2270 break;
2271 default:
2272 UNREACHABLE();
2273 }
2274}
2275
2276
2277void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2278 Label non_smi, slow;
2279 GenerateSmiCodeSub(masm, &non_smi, &slow);
2280 __ bind(&non_smi);
2281 GenerateHeapNumberCodeSub(masm, &slow);
2282 __ bind(&slow);
2283 GenerateGenericCodeFallback(masm);
2284}
2285
2286
2287void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2288 Label non_smi, slow;
2289 GenerateSmiCodeBitNot(masm, &non_smi);
2290 __ bind(&non_smi);
2291 GenerateHeapNumberCodeBitNot(masm, &slow);
2292 __ bind(&slow);
2293 GenerateGenericCodeFallback(masm);
2294}
2295
2296
2297void UnaryOpStub::GenerateGenericCodeFallback(
2298 MacroAssembler* masm) {
2299 // Handle the slow case by jumping to the JavaScript builtin.
2300 __ push(a0);
2301 switch (op_) {
2302 case Token::SUB:
2303 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2304 break;
2305 case Token::BIT_NOT:
2306 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2307 break;
2308 default:
2309 UNREACHABLE();
2310 }
2311}
2312
2313
Ben Murdoch257744e2011-11-30 15:57:28 +00002314void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2315 Label get_result;
2316
2317 __ Push(a1, a0);
2318
2319 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2320 __ li(a1, Operand(Smi::FromInt(op_)));
2321 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2322 __ Push(a2, a1, a0);
2323
2324 __ TailCallExternalReference(
2325 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2326 masm->isolate()),
2327 5,
2328 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002329}
2330
2331
Ben Murdoch257744e2011-11-30 15:57:28 +00002332void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
Steve Block44f0eee2011-05-26 01:26:41 +01002333 MacroAssembler* masm) {
2334 UNIMPLEMENTED();
2335}
2336
2337
Ben Murdoch257744e2011-11-30 15:57:28 +00002338void BinaryOpStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002339 // Explicitly allow generation of nested stubs. It is safe here because
2340 // generation code does not use any raw pointers.
2341 AllowStubCallsScope allow_stub_calls(masm, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00002342 switch (operands_type_) {
2343 case BinaryOpIC::UNINITIALIZED:
2344 GenerateTypeTransition(masm);
2345 break;
2346 case BinaryOpIC::SMI:
2347 GenerateSmiStub(masm);
2348 break;
2349 case BinaryOpIC::INT32:
2350 GenerateInt32Stub(masm);
2351 break;
2352 case BinaryOpIC::HEAP_NUMBER:
2353 GenerateHeapNumberStub(masm);
2354 break;
2355 case BinaryOpIC::ODDBALL:
2356 GenerateOddballStub(masm);
2357 break;
2358 case BinaryOpIC::BOTH_STRING:
2359 GenerateBothStringStub(masm);
2360 break;
2361 case BinaryOpIC::STRING:
2362 GenerateStringStub(masm);
2363 break;
2364 case BinaryOpIC::GENERIC:
2365 GenerateGeneric(masm);
2366 break;
2367 default:
2368 UNREACHABLE();
2369 }
Steve Block44f0eee2011-05-26 01:26:41 +01002370}
2371
2372
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002373void BinaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002374 const char* op_name = Token::Name(op_);
2375 const char* overwrite_name;
2376 switch (mode_) {
2377 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2378 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2379 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2380 default: overwrite_name = "UnknownOverwrite"; break;
2381 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002382 stream->Add("BinaryOpStub_%s_%s_%s",
2383 op_name,
2384 overwrite_name,
2385 BinaryOpIC::GetName(operands_type_));
Steve Block44f0eee2011-05-26 01:26:41 +01002386}
2387
2388
2389
Ben Murdoch257744e2011-11-30 15:57:28 +00002390void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2391 Register left = a1;
2392 Register right = a0;
2393
2394 Register scratch1 = t0;
2395 Register scratch2 = t1;
2396
2397 ASSERT(right.is(a0));
2398 STATIC_ASSERT(kSmiTag == 0);
2399
2400 Label not_smi_result;
2401 switch (op_) {
2402 case Token::ADD:
2403 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2404 __ RetOnNoOverflow(scratch1);
2405 // No need to revert anything - right and left are intact.
2406 break;
2407 case Token::SUB:
2408 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2409 __ RetOnNoOverflow(scratch1);
2410 // No need to revert anything - right and left are intact.
2411 break;
2412 case Token::MUL: {
2413 // Remove tag from one of the operands. This way the multiplication result
2414 // will be a smi if it fits the smi range.
2415 __ SmiUntag(scratch1, right);
2416 // Do multiplication.
2417 // lo = lower 32 bits of scratch1 * left.
2418 // hi = higher 32 bits of scratch1 * left.
2419 __ Mult(left, scratch1);
2420 // Check for overflowing the smi range - no overflow if higher 33 bits of
2421 // the result are identical.
2422 __ mflo(scratch1);
2423 __ mfhi(scratch2);
2424 __ sra(scratch1, scratch1, 31);
2425 __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2426 // Go slow on zero result to handle -0.
2427 __ mflo(v0);
2428 __ Ret(ne, v0, Operand(zero_reg));
2429 // We need -0 if we were multiplying a negative number with 0 to get 0.
2430 // We know one of them was zero.
2431 __ Addu(scratch2, right, left);
2432 Label skip;
2433 // ARM uses the 'pl' condition, which is 'ge'.
2434 // Negating it results in 'lt'.
2435 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2436 ASSERT(Smi::FromInt(0) == 0);
2437 __ mov(v0, zero_reg);
2438 __ Ret(); // Return smi 0 if the non-zero one was positive.
2439 __ bind(&skip);
2440 // We fall through here if we multiplied a negative number with 0, because
2441 // that would mean we should produce -0.
2442 }
2443 break;
2444 case Token::DIV: {
2445 Label done;
2446 __ SmiUntag(scratch2, right);
2447 __ SmiUntag(scratch1, left);
2448 __ Div(scratch1, scratch2);
2449 // A minor optimization: div may be calculated asynchronously, so we check
2450 // for division by zero before getting the result.
2451 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2452 // If the result is 0, we need to make sure the dividsor (right) is
2453 // positive, otherwise it is a -0 case.
2454 // Quotient is in 'lo', remainder is in 'hi'.
2455 // Check for no remainder first.
2456 __ mfhi(scratch1);
2457 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2458 __ mflo(scratch1);
2459 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2460 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2461 __ bind(&done);
2462 // Check that the signed result fits in a Smi.
2463 __ Addu(scratch2, scratch1, Operand(0x40000000));
2464 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2465 __ SmiTag(v0, scratch1);
2466 __ Ret();
2467 }
2468 break;
2469 case Token::MOD: {
2470 Label done;
2471 __ SmiUntag(scratch2, right);
2472 __ SmiUntag(scratch1, left);
2473 __ Div(scratch1, scratch2);
2474 // A minor optimization: div may be calculated asynchronously, so we check
2475 // for division by 0 before calling mfhi.
2476 // Check for zero on the right hand side.
2477 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2478 // If the result is 0, we need to make sure the dividend (left) is
2479 // positive (or 0), otherwise it is a -0 case.
2480 // Remainder is in 'hi'.
2481 __ mfhi(scratch2);
2482 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2483 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2484 __ bind(&done);
2485 // Check that the signed result fits in a Smi.
2486 __ Addu(scratch1, scratch2, Operand(0x40000000));
2487 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2488 __ SmiTag(v0, scratch2);
2489 __ Ret();
2490 }
2491 break;
2492 case Token::BIT_OR:
2493 __ Or(v0, left, Operand(right));
2494 __ Ret();
2495 break;
2496 case Token::BIT_AND:
2497 __ And(v0, left, Operand(right));
2498 __ Ret();
2499 break;
2500 case Token::BIT_XOR:
2501 __ Xor(v0, left, Operand(right));
2502 __ Ret();
2503 break;
2504 case Token::SAR:
2505 // Remove tags from right operand.
2506 __ GetLeastBitsFromSmi(scratch1, right, 5);
2507 __ srav(scratch1, left, scratch1);
2508 // Smi tag result.
2509 __ And(v0, scratch1, Operand(~kSmiTagMask));
2510 __ Ret();
2511 break;
2512 case Token::SHR:
2513 // Remove tags from operands. We can't do this on a 31 bit number
2514 // because then the 0s get shifted into bit 30 instead of bit 31.
2515 __ SmiUntag(scratch1, left);
2516 __ GetLeastBitsFromSmi(scratch2, right, 5);
2517 __ srlv(v0, scratch1, scratch2);
2518 // Unsigned shift is not allowed to produce a negative number, so
2519 // check the sign bit and the sign bit after Smi tagging.
2520 __ And(scratch1, v0, Operand(0xc0000000));
2521 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2522 // Smi tag result.
2523 __ SmiTag(v0);
2524 __ Ret();
2525 break;
2526 case Token::SHL:
2527 // Remove tags from operands.
2528 __ SmiUntag(scratch1, left);
2529 __ GetLeastBitsFromSmi(scratch2, right, 5);
2530 __ sllv(scratch1, scratch1, scratch2);
2531 // Check that the signed result fits in a Smi.
2532 __ Addu(scratch2, scratch1, Operand(0x40000000));
2533 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2534 __ SmiTag(v0, scratch1);
2535 __ Ret();
2536 break;
2537 default:
2538 UNREACHABLE();
2539 }
2540 __ bind(&not_smi_result);
Steve Block44f0eee2011-05-26 01:26:41 +01002541}
2542
2543
Ben Murdoch257744e2011-11-30 15:57:28 +00002544void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2545 bool smi_operands,
2546 Label* not_numbers,
2547 Label* gc_required) {
2548 Register left = a1;
2549 Register right = a0;
2550 Register scratch1 = t3;
2551 Register scratch2 = t5;
2552 Register scratch3 = t0;
2553
2554 ASSERT(smi_operands || (not_numbers != NULL));
2555 if (smi_operands && FLAG_debug_code) {
2556 __ AbortIfNotSmi(left);
2557 __ AbortIfNotSmi(right);
2558 }
2559
2560 Register heap_number_map = t2;
2561 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2562
2563 switch (op_) {
2564 case Token::ADD:
2565 case Token::SUB:
2566 case Token::MUL:
2567 case Token::DIV:
2568 case Token::MOD: {
2569 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2570 // depending on whether FPU is available or not.
2571 FloatingPointHelper::Destination destination =
2572 CpuFeatures::IsSupported(FPU) &&
2573 op_ != Token::MOD ?
2574 FloatingPointHelper::kFPURegisters :
2575 FloatingPointHelper::kCoreRegisters;
2576
2577 // Allocate new heap number for result.
2578 Register result = s0;
2579 GenerateHeapResultAllocation(
2580 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2581
2582 // Load the operands.
2583 if (smi_operands) {
2584 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2585 } else {
2586 FloatingPointHelper::LoadOperands(masm,
2587 destination,
2588 heap_number_map,
2589 scratch1,
2590 scratch2,
2591 not_numbers);
2592 }
2593
2594 // Calculate the result.
2595 if (destination == FloatingPointHelper::kFPURegisters) {
2596 // Using FPU registers:
2597 // f12: Left value.
2598 // f14: Right value.
2599 CpuFeatures::Scope scope(FPU);
2600 switch (op_) {
2601 case Token::ADD:
2602 __ add_d(f10, f12, f14);
2603 break;
2604 case Token::SUB:
2605 __ sub_d(f10, f12, f14);
2606 break;
2607 case Token::MUL:
2608 __ mul_d(f10, f12, f14);
2609 break;
2610 case Token::DIV:
2611 __ div_d(f10, f12, f14);
2612 break;
2613 default:
2614 UNREACHABLE();
2615 }
2616
2617 // ARM uses a workaround here because of the unaligned HeapNumber
2618 // kValueOffset. On MIPS this workaround is built into sdc1 so
2619 // there's no point in generating even more instructions.
2620 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2621 __ mov(v0, result);
2622 __ Ret();
2623 } else {
2624 // Call the C function to handle the double operation.
2625 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2626 op_,
2627 result,
2628 scratch1);
2629 if (FLAG_debug_code) {
2630 __ stop("Unreachable code.");
2631 }
2632 }
2633 break;
2634 }
2635 case Token::BIT_OR:
2636 case Token::BIT_XOR:
2637 case Token::BIT_AND:
2638 case Token::SAR:
2639 case Token::SHR:
2640 case Token::SHL: {
2641 if (smi_operands) {
2642 __ SmiUntag(a3, left);
2643 __ SmiUntag(a2, right);
2644 } else {
2645 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2646 FloatingPointHelper::ConvertNumberToInt32(masm,
2647 left,
2648 a3,
2649 heap_number_map,
2650 scratch1,
2651 scratch2,
2652 scratch3,
2653 f0,
2654 not_numbers);
2655 FloatingPointHelper::ConvertNumberToInt32(masm,
2656 right,
2657 a2,
2658 heap_number_map,
2659 scratch1,
2660 scratch2,
2661 scratch3,
2662 f0,
2663 not_numbers);
2664 }
2665 Label result_not_a_smi;
2666 switch (op_) {
2667 case Token::BIT_OR:
2668 __ Or(a2, a3, Operand(a2));
2669 break;
2670 case Token::BIT_XOR:
2671 __ Xor(a2, a3, Operand(a2));
2672 break;
2673 case Token::BIT_AND:
2674 __ And(a2, a3, Operand(a2));
2675 break;
2676 case Token::SAR:
2677 // Use only the 5 least significant bits of the shift count.
2678 __ GetLeastBitsFromInt32(a2, a2, 5);
2679 __ srav(a2, a3, a2);
2680 break;
2681 case Token::SHR:
2682 // Use only the 5 least significant bits of the shift count.
2683 __ GetLeastBitsFromInt32(a2, a2, 5);
2684 __ srlv(a2, a3, a2);
2685 // SHR is special because it is required to produce a positive answer.
2686 // The code below for writing into heap numbers isn't capable of
2687 // writing the register as an unsigned int so we go to slow case if we
2688 // hit this case.
2689 if (CpuFeatures::IsSupported(FPU)) {
2690 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2691 } else {
2692 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2693 }
2694 break;
2695 case Token::SHL:
2696 // Use only the 5 least significant bits of the shift count.
2697 __ GetLeastBitsFromInt32(a2, a2, 5);
2698 __ sllv(a2, a3, a2);
2699 break;
2700 default:
2701 UNREACHABLE();
2702 }
2703 // Check that the *signed* result fits in a smi.
2704 __ Addu(a3, a2, Operand(0x40000000));
2705 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2706 __ SmiTag(v0, a2);
2707 __ Ret();
2708
2709 // Allocate new heap number for result.
2710 __ bind(&result_not_a_smi);
2711 Register result = t1;
2712 if (smi_operands) {
2713 __ AllocateHeapNumber(
2714 result, scratch1, scratch2, heap_number_map, gc_required);
2715 } else {
2716 GenerateHeapResultAllocation(
2717 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2718 }
2719
2720 // a2: Answer as signed int32.
2721 // t1: Heap number to write answer into.
2722
2723 // Nothing can go wrong now, so move the heap number to v0, which is the
2724 // result.
2725 __ mov(v0, t1);
2726
2727 if (CpuFeatures::IsSupported(FPU)) {
2728 // Convert the int32 in a2 to the heap number in a0. As
2729 // mentioned above SHR needs to always produce a positive result.
2730 CpuFeatures::Scope scope(FPU);
2731 __ mtc1(a2, f0);
2732 if (op_ == Token::SHR) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002733 __ Cvt_d_uw(f0, f0, f22);
Ben Murdoch257744e2011-11-30 15:57:28 +00002734 } else {
2735 __ cvt_d_w(f0, f0);
2736 }
2737 // ARM uses a workaround here because of the unaligned HeapNumber
2738 // kValueOffset. On MIPS this workaround is built into sdc1 so
2739 // there's no point in generating even more instructions.
2740 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2741 __ Ret();
2742 } else {
2743 // Tail call that writes the int32 in a2 to the heap number in v0, using
2744 // a3 and a0 as scratch. v0 is preserved and returned.
2745 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2746 __ TailCallStub(&stub);
2747 }
2748 break;
2749 }
2750 default:
2751 UNREACHABLE();
2752 }
Steve Block44f0eee2011-05-26 01:26:41 +01002753}
2754
2755
2756// Generate the smi code. If the operation on smis are successful this return is
2757// generated. If the result is not a smi and heap number allocation is not
2758// requested the code falls through. If number allocation is requested but a
2759// heap number cannot be allocated the code jumps to the lable gc_required.
Ben Murdoch257744e2011-11-30 15:57:28 +00002760void BinaryOpStub::GenerateSmiCode(
2761 MacroAssembler* masm,
2762 Label* use_runtime,
Steve Block44f0eee2011-05-26 01:26:41 +01002763 Label* gc_required,
2764 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002765 Label not_smis;
2766
2767 Register left = a1;
2768 Register right = a0;
2769 Register scratch1 = t3;
2770 Register scratch2 = t5;
2771
2772 // Perform combined smi check on both operands.
2773 __ Or(scratch1, left, Operand(right));
2774 STATIC_ASSERT(kSmiTag == 0);
2775 __ JumpIfNotSmi(scratch1, &not_smis);
2776
2777 // If the smi-smi operation results in a smi return is generated.
2778 GenerateSmiSmiOperation(masm);
2779
2780 // If heap number results are possible generate the result in an allocated
2781 // heap number.
2782 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2783 GenerateFPOperation(masm, true, use_runtime, gc_required);
2784 }
2785 __ bind(&not_smis);
Steve Block44f0eee2011-05-26 01:26:41 +01002786}
2787
2788
Ben Murdoch257744e2011-11-30 15:57:28 +00002789void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2790 Label not_smis, call_runtime;
2791
2792 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2793 result_type_ == BinaryOpIC::SMI) {
2794 // Only allow smi results.
2795 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2796 } else {
2797 // Allow heap number result and don't make a transition if a heap number
2798 // cannot be allocated.
2799 GenerateSmiCode(masm,
2800 &call_runtime,
2801 &call_runtime,
2802 ALLOW_HEAPNUMBER_RESULTS);
2803 }
2804
2805 // Code falls through if the result is not returned as either a smi or heap
2806 // number.
2807 GenerateTypeTransition(masm);
2808
2809 __ bind(&call_runtime);
2810 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002811}
2812
2813
Ben Murdoch257744e2011-11-30 15:57:28 +00002814void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2815 ASSERT(operands_type_ == BinaryOpIC::STRING);
2816 // Try to add arguments as strings, otherwise, transition to the generic
2817 // BinaryOpIC type.
2818 GenerateAddStrings(masm);
2819 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002820}
2821
2822
Ben Murdoch257744e2011-11-30 15:57:28 +00002823void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2824 Label call_runtime;
2825 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2826 ASSERT(op_ == Token::ADD);
2827 // If both arguments are strings, call the string add stub.
2828 // Otherwise, do a transition.
2829
2830 // Registers containing left and right operands respectively.
2831 Register left = a1;
2832 Register right = a0;
2833
2834 // Test if left operand is a string.
2835 __ JumpIfSmi(left, &call_runtime);
2836 __ GetObjectType(left, a2, a2);
2837 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2838
2839 // Test if right operand is a string.
2840 __ JumpIfSmi(right, &call_runtime);
2841 __ GetObjectType(right, a2, a2);
2842 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2843
2844 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2845 GenerateRegisterArgsPush(masm);
2846 __ TailCallStub(&string_add_stub);
2847
2848 __ bind(&call_runtime);
2849 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002850}
2851
2852
Ben Murdoch257744e2011-11-30 15:57:28 +00002853void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2854 ASSERT(operands_type_ == BinaryOpIC::INT32);
2855
2856 Register left = a1;
2857 Register right = a0;
2858 Register scratch1 = t3;
2859 Register scratch2 = t5;
2860 FPURegister double_scratch = f0;
2861 FPURegister single_scratch = f6;
2862
2863 Register heap_number_result = no_reg;
2864 Register heap_number_map = t2;
2865 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2866
2867 Label call_runtime;
2868 // Labels for type transition, used for wrong input or output types.
2869 // Both label are currently actually bound to the same position. We use two
2870 // different label to differentiate the cause leading to type transition.
2871 Label transition;
2872
2873 // Smi-smi fast case.
2874 Label skip;
2875 __ Or(scratch1, left, right);
2876 __ JumpIfNotSmi(scratch1, &skip);
2877 GenerateSmiSmiOperation(masm);
2878 // Fall through if the result is not a smi.
2879 __ bind(&skip);
2880
2881 switch (op_) {
2882 case Token::ADD:
2883 case Token::SUB:
2884 case Token::MUL:
2885 case Token::DIV:
2886 case Token::MOD: {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002887 // Load both operands and check that they are 32-bit integer.
2888 // Jump to type transition if they are not. The registers a0 and a1 (right
2889 // and left) are preserved for the runtime call.
2890 FloatingPointHelper::Destination destination =
2891 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2892 ? FloatingPointHelper::kFPURegisters
2893 : FloatingPointHelper::kCoreRegisters;
Ben Murdoch257744e2011-11-30 15:57:28 +00002894
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002895 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2896 right,
2897 destination,
2898 f14,
2899 a2,
2900 a3,
2901 heap_number_map,
2902 scratch1,
2903 scratch2,
2904 f2,
2905 &transition);
2906 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2907 left,
2908 destination,
2909 f12,
2910 t0,
2911 t1,
2912 heap_number_map,
2913 scratch1,
2914 scratch2,
2915 f2,
2916 &transition);
Ben Murdoch257744e2011-11-30 15:57:28 +00002917
2918 if (destination == FloatingPointHelper::kFPURegisters) {
2919 CpuFeatures::Scope scope(FPU);
2920 Label return_heap_number;
2921 switch (op_) {
2922 case Token::ADD:
2923 __ add_d(f10, f12, f14);
2924 break;
2925 case Token::SUB:
2926 __ sub_d(f10, f12, f14);
2927 break;
2928 case Token::MUL:
2929 __ mul_d(f10, f12, f14);
2930 break;
2931 case Token::DIV:
2932 __ div_d(f10, f12, f14);
2933 break;
2934 default:
2935 UNREACHABLE();
2936 }
2937
2938 if (op_ != Token::DIV) {
2939 // These operations produce an integer result.
2940 // Try to return a smi if we can.
2941 // Otherwise return a heap number if allowed, or jump to type
2942 // transition.
2943
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002944 Register except_flag = scratch2;
2945 __ EmitFPUTruncate(kRoundToZero,
2946 single_scratch,
2947 f10,
2948 scratch1,
2949 except_flag);
Ben Murdoch257744e2011-11-30 15:57:28 +00002950
2951 if (result_type_ <= BinaryOpIC::INT32) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002952 // If except_flag != 0, result does not fit in a 32-bit integer.
2953 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00002954 }
2955
2956 // Check if the result fits in a smi.
2957 __ mfc1(scratch1, single_scratch);
2958 __ Addu(scratch2, scratch1, Operand(0x40000000));
2959 // If not try to return a heap number.
2960 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2961 // Check for minus zero. Return heap number for minus zero.
2962 Label not_zero;
2963 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2964 __ mfc1(scratch2, f11);
2965 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2966 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2967 __ bind(&not_zero);
2968
2969 // Tag the result and return.
2970 __ SmiTag(v0, scratch1);
2971 __ Ret();
2972 } else {
2973 // DIV just falls through to allocating a heap number.
2974 }
2975
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002976 __ bind(&return_heap_number);
2977 // Return a heap number, or fall through to type transition or runtime
2978 // call if we can't.
2979 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2980 : BinaryOpIC::INT32)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002981 // We are using FPU registers so s0 is available.
2982 heap_number_result = s0;
2983 GenerateHeapResultAllocation(masm,
2984 heap_number_result,
2985 heap_number_map,
2986 scratch1,
2987 scratch2,
2988 &call_runtime);
2989 __ mov(v0, heap_number_result);
2990 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2991 __ Ret();
2992 }
2993
2994 // A DIV operation expecting an integer result falls through
2995 // to type transition.
2996
2997 } else {
2998 // We preserved a0 and a1 to be able to call runtime.
2999 // Save the left value on the stack.
3000 __ Push(t1, t0);
3001
3002 Label pop_and_call_runtime;
3003
3004 // Allocate a heap number to store the result.
3005 heap_number_result = s0;
3006 GenerateHeapResultAllocation(masm,
3007 heap_number_result,
3008 heap_number_map,
3009 scratch1,
3010 scratch2,
3011 &pop_and_call_runtime);
3012
3013 // Load the left value from the value saved on the stack.
3014 __ Pop(a1, a0);
3015
3016 // Call the C function to handle the double operation.
3017 FloatingPointHelper::CallCCodeForDoubleOperation(
3018 masm, op_, heap_number_result, scratch1);
3019 if (FLAG_debug_code) {
3020 __ stop("Unreachable code.");
3021 }
3022
3023 __ bind(&pop_and_call_runtime);
3024 __ Drop(2);
3025 __ Branch(&call_runtime);
3026 }
3027
3028 break;
3029 }
3030
3031 case Token::BIT_OR:
3032 case Token::BIT_XOR:
3033 case Token::BIT_AND:
3034 case Token::SAR:
3035 case Token::SHR:
3036 case Token::SHL: {
3037 Label return_heap_number;
3038 Register scratch3 = t1;
3039 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3040 // registers a0 and a1 (right and left) are preserved for the runtime
3041 // call.
3042 FloatingPointHelper::LoadNumberAsInt32(masm,
3043 left,
3044 a3,
3045 heap_number_map,
3046 scratch1,
3047 scratch2,
3048 scratch3,
3049 f0,
3050 &transition);
3051 FloatingPointHelper::LoadNumberAsInt32(masm,
3052 right,
3053 a2,
3054 heap_number_map,
3055 scratch1,
3056 scratch2,
3057 scratch3,
3058 f0,
3059 &transition);
3060
3061 // The ECMA-262 standard specifies that, for shift operations, only the
3062 // 5 least significant bits of the shift value should be used.
3063 switch (op_) {
3064 case Token::BIT_OR:
3065 __ Or(a2, a3, Operand(a2));
3066 break;
3067 case Token::BIT_XOR:
3068 __ Xor(a2, a3, Operand(a2));
3069 break;
3070 case Token::BIT_AND:
3071 __ And(a2, a3, Operand(a2));
3072 break;
3073 case Token::SAR:
3074 __ And(a2, a2, Operand(0x1f));
3075 __ srav(a2, a3, a2);
3076 break;
3077 case Token::SHR:
3078 __ And(a2, a2, Operand(0x1f));
3079 __ srlv(a2, a3, a2);
3080 // SHR is special because it is required to produce a positive answer.
3081 // We only get a negative result if the shift value (a2) is 0.
3082 // This result cannot be respresented as a signed 32-bit integer, try
3083 // to return a heap number if we can.
3084 // The non FPU code does not support this special case, so jump to
3085 // runtime if we don't support it.
3086 if (CpuFeatures::IsSupported(FPU)) {
3087 __ Branch((result_type_ <= BinaryOpIC::INT32)
3088 ? &transition
3089 : &return_heap_number,
3090 lt,
3091 a2,
3092 Operand(zero_reg));
3093 } else {
3094 __ Branch((result_type_ <= BinaryOpIC::INT32)
3095 ? &transition
3096 : &call_runtime,
3097 lt,
3098 a2,
3099 Operand(zero_reg));
3100 }
3101 break;
3102 case Token::SHL:
3103 __ And(a2, a2, Operand(0x1f));
3104 __ sllv(a2, a3, a2);
3105 break;
3106 default:
3107 UNREACHABLE();
3108 }
3109
3110 // Check if the result fits in a smi.
3111 __ Addu(scratch1, a2, Operand(0x40000000));
3112 // If not try to return a heap number. (We know the result is an int32.)
3113 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3114 // Tag the result and return.
3115 __ SmiTag(v0, a2);
3116 __ Ret();
3117
3118 __ bind(&return_heap_number);
3119 heap_number_result = t1;
3120 GenerateHeapResultAllocation(masm,
3121 heap_number_result,
3122 heap_number_map,
3123 scratch1,
3124 scratch2,
3125 &call_runtime);
3126
3127 if (CpuFeatures::IsSupported(FPU)) {
3128 CpuFeatures::Scope scope(FPU);
3129
3130 if (op_ != Token::SHR) {
3131 // Convert the result to a floating point value.
3132 __ mtc1(a2, double_scratch);
3133 __ cvt_d_w(double_scratch, double_scratch);
3134 } else {
3135 // The result must be interpreted as an unsigned 32-bit integer.
3136 __ mtc1(a2, double_scratch);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003137 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003138 }
3139
3140 // Store the result.
3141 __ mov(v0, heap_number_result);
3142 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3143 __ Ret();
3144 } else {
3145 // Tail call that writes the int32 in a2 to the heap number in v0, using
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003146 // a3 and a0 as scratch. v0 is preserved and returned.
Ben Murdoch257744e2011-11-30 15:57:28 +00003147 __ mov(a0, t1);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003148 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
Ben Murdoch257744e2011-11-30 15:57:28 +00003149 __ TailCallStub(&stub);
3150 }
3151
3152 break;
3153 }
3154
3155 default:
3156 UNREACHABLE();
3157 }
3158
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003159 // We never expect DIV to yield an integer result, so we always generate
3160 // type transition code for DIV operations expecting an integer result: the
3161 // code will fall through to this type transition.
3162 if (transition.is_linked() ||
3163 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003164 __ bind(&transition);
3165 GenerateTypeTransition(masm);
3166 }
3167
3168 __ bind(&call_runtime);
3169 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003170}
3171
3172
Ben Murdoch257744e2011-11-30 15:57:28 +00003173void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3174 Label call_runtime;
3175
3176 if (op_ == Token::ADD) {
3177 // Handle string addition here, because it is the only operation
3178 // that does not do a ToNumber conversion on the operands.
3179 GenerateAddStrings(masm);
3180 }
3181
3182 // Convert oddball arguments to numbers.
3183 Label check, done;
3184 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3185 __ Branch(&check, ne, a1, Operand(t0));
3186 if (Token::IsBitOp(op_)) {
3187 __ li(a1, Operand(Smi::FromInt(0)));
3188 } else {
3189 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3190 }
3191 __ jmp(&done);
3192 __ bind(&check);
3193 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3194 __ Branch(&done, ne, a0, Operand(t0));
3195 if (Token::IsBitOp(op_)) {
3196 __ li(a0, Operand(Smi::FromInt(0)));
3197 } else {
3198 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3199 }
3200 __ bind(&done);
3201
3202 GenerateHeapNumberStub(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003203}
3204
3205
Ben Murdoch257744e2011-11-30 15:57:28 +00003206void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3207 Label call_runtime;
3208 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3209
3210 __ bind(&call_runtime);
3211 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003212}
3213
3214
Ben Murdoch257744e2011-11-30 15:57:28 +00003215void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3216 Label call_runtime, call_string_add_or_runtime;
3217
3218 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3219
3220 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3221
3222 __ bind(&call_string_add_or_runtime);
3223 if (op_ == Token::ADD) {
3224 GenerateAddStrings(masm);
3225 }
3226
3227 __ bind(&call_runtime);
3228 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003229}
3230
3231
Ben Murdoch257744e2011-11-30 15:57:28 +00003232void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3233 ASSERT(op_ == Token::ADD);
3234 Label left_not_string, call_runtime;
3235
3236 Register left = a1;
3237 Register right = a0;
3238
3239 // Check if left argument is a string.
3240 __ JumpIfSmi(left, &left_not_string);
3241 __ GetObjectType(left, a2, a2);
3242 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3243
3244 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3245 GenerateRegisterArgsPush(masm);
3246 __ TailCallStub(&string_add_left_stub);
3247
3248 // Left operand is not a string, test right.
3249 __ bind(&left_not_string);
3250 __ JumpIfSmi(right, &call_runtime);
3251 __ GetObjectType(right, a2, a2);
3252 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3253
3254 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3255 GenerateRegisterArgsPush(masm);
3256 __ TailCallStub(&string_add_right_stub);
3257
3258 // At least one argument is not a string.
3259 __ bind(&call_runtime);
3260}
3261
3262
3263void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3264 GenerateRegisterArgsPush(masm);
3265 switch (op_) {
3266 case Token::ADD:
3267 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3268 break;
3269 case Token::SUB:
3270 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3271 break;
3272 case Token::MUL:
3273 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3274 break;
3275 case Token::DIV:
3276 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3277 break;
3278 case Token::MOD:
3279 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3280 break;
3281 case Token::BIT_OR:
3282 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3283 break;
3284 case Token::BIT_AND:
3285 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3286 break;
3287 case Token::BIT_XOR:
3288 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3289 break;
3290 case Token::SAR:
3291 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3292 break;
3293 case Token::SHR:
3294 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3295 break;
3296 case Token::SHL:
3297 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3298 break;
3299 default:
3300 UNREACHABLE();
3301 }
3302}
3303
3304
3305void BinaryOpStub::GenerateHeapResultAllocation(
Steve Block44f0eee2011-05-26 01:26:41 +01003306 MacroAssembler* masm,
3307 Register result,
3308 Register heap_number_map,
3309 Register scratch1,
3310 Register scratch2,
3311 Label* gc_required) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003312
3313 // Code below will scratch result if allocation fails. To keep both arguments
3314 // intact for the runtime call result cannot be one of these.
3315 ASSERT(!result.is(a0) && !result.is(a1));
3316
3317 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3318 Label skip_allocation, allocated;
3319 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3320 // If the overwritable operand is already an object, we skip the
3321 // allocation of a heap number.
3322 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3323 // Allocate a heap number for the result.
3324 __ AllocateHeapNumber(
3325 result, scratch1, scratch2, heap_number_map, gc_required);
3326 __ Branch(&allocated);
3327 __ bind(&skip_allocation);
3328 // Use object holding the overwritable operand for result.
3329 __ mov(result, overwritable_operand);
3330 __ bind(&allocated);
3331 } else {
3332 ASSERT(mode_ == NO_OVERWRITE);
3333 __ AllocateHeapNumber(
3334 result, scratch1, scratch2, heap_number_map, gc_required);
3335 }
Steve Block44f0eee2011-05-26 01:26:41 +01003336}
3337
3338
Ben Murdoch257744e2011-11-30 15:57:28 +00003339void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3340 __ Push(a1, a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003341}
3342
3343
3344
3345void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003346 // Untagged case: double input in f4, double result goes
3347 // into f4.
3348 // Tagged case: tagged input on top of stack and in a0,
3349 // tagged result (heap number) goes into v0.
3350
3351 Label input_not_smi;
3352 Label loaded;
3353 Label calculate;
3354 Label invalid_cache;
3355 const Register scratch0 = t5;
3356 const Register scratch1 = t3;
3357 const Register cache_entry = a0;
3358 const bool tagged = (argument_type_ == TAGGED);
3359
3360 if (CpuFeatures::IsSupported(FPU)) {
3361 CpuFeatures::Scope scope(FPU);
3362
3363 if (tagged) {
3364 // Argument is a number and is on stack and in a0.
3365 // Load argument and check if it is a smi.
3366 __ JumpIfNotSmi(a0, &input_not_smi);
3367
3368 // Input is a smi. Convert to double and load the low and high words
3369 // of the double into a2, a3.
3370 __ sra(t0, a0, kSmiTagSize);
3371 __ mtc1(t0, f4);
3372 __ cvt_d_w(f4, f4);
3373 __ Move(a2, a3, f4);
3374 __ Branch(&loaded);
3375
3376 __ bind(&input_not_smi);
3377 // Check if input is a HeapNumber.
3378 __ CheckMap(a0,
3379 a1,
3380 Heap::kHeapNumberMapRootIndex,
3381 &calculate,
3382 DONT_DO_SMI_CHECK);
3383 // Input is a HeapNumber. Store the
3384 // low and high words into a2, a3.
3385 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3386 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3387 } else {
3388 // Input is untagged double in f4. Output goes to f4.
3389 __ Move(a2, a3, f4);
3390 }
3391 __ bind(&loaded);
3392 // a2 = low 32 bits of double value.
3393 // a3 = high 32 bits of double value.
3394 // Compute hash (the shifts are arithmetic):
3395 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3396 __ Xor(a1, a2, a3);
3397 __ sra(t0, a1, 16);
3398 __ Xor(a1, a1, t0);
3399 __ sra(t0, a1, 8);
3400 __ Xor(a1, a1, t0);
3401 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3402 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3403
3404 // a2 = low 32 bits of double value.
3405 // a3 = high 32 bits of double value.
3406 // a1 = TranscendentalCache::hash(double value).
3407 __ li(cache_entry, Operand(
3408 ExternalReference::transcendental_cache_array_address(
3409 masm->isolate())));
3410 // a0 points to cache array.
3411 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3412 Isolate::Current()->transcendental_cache()->caches_[0])));
3413 // a0 points to the cache for the type type_.
3414 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3415 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3416
3417#ifdef DEBUG
3418 // Check that the layout of cache elements match expectations.
3419 { TranscendentalCache::SubCache::Element test_elem[2];
3420 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3421 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3422 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3423 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3424 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3425 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3426 CHECK_EQ(0, elem_in0 - elem_start);
3427 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3428 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3429 }
3430#endif
3431
3432 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3433 __ sll(t0, a1, 1);
3434 __ Addu(a1, a1, t0);
3435 __ sll(t0, a1, 2);
3436 __ Addu(cache_entry, cache_entry, t0);
3437
3438 // Check if cache matches: Double value is stored in uint32_t[2] array.
3439 __ lw(t0, MemOperand(cache_entry, 0));
3440 __ lw(t1, MemOperand(cache_entry, 4));
3441 __ lw(t2, MemOperand(cache_entry, 8));
Ben Murdoch257744e2011-11-30 15:57:28 +00003442 __ Branch(&calculate, ne, a2, Operand(t0));
3443 __ Branch(&calculate, ne, a3, Operand(t1));
3444 // Cache hit. Load result, cleanup and return.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003445 Counters* counters = masm->isolate()->counters();
3446 __ IncrementCounter(
3447 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003448 if (tagged) {
3449 // Pop input value from stack and load result into v0.
3450 __ Drop(1);
3451 __ mov(v0, t2);
3452 } else {
3453 // Load result into f4.
3454 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3455 }
3456 __ Ret();
3457 } // if (CpuFeatures::IsSupported(FPU))
3458
3459 __ bind(&calculate);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003460 Counters* counters = masm->isolate()->counters();
3461 __ IncrementCounter(
3462 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003463 if (tagged) {
3464 __ bind(&invalid_cache);
3465 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3466 masm->isolate()),
3467 1,
3468 1);
3469 } else {
3470 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3471 CpuFeatures::Scope scope(FPU);
3472
3473 Label no_update;
3474 Label skip_cache;
3475 const Register heap_number_map = t2;
3476
3477 // Call C function to calculate the result and update the cache.
3478 // Register a0 holds precalculated cache entry address; preserve
3479 // it on the stack and pop it into register cache_entry after the
3480 // call.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003481 __ Push(cache_entry, a2, a3);
Ben Murdoch257744e2011-11-30 15:57:28 +00003482 GenerateCallCFunction(masm, scratch0);
3483 __ GetCFunctionDoubleResult(f4);
3484
3485 // Try to update the cache. If we cannot allocate a
3486 // heap number, we return the result without updating.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003487 __ Pop(cache_entry, a2, a3);
Ben Murdoch257744e2011-11-30 15:57:28 +00003488 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3489 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3490 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3491
3492 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3493 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3494 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3495
3496 __ mov(v0, cache_entry);
3497 __ Ret();
3498
3499 __ bind(&invalid_cache);
3500 // The cache is invalid. Call runtime which will recreate the
3501 // cache.
3502 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3503 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3504 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003505 {
3506 FrameScope scope(masm, StackFrame::INTERNAL);
3507 __ push(a0);
3508 __ CallRuntime(RuntimeFunction(), 1);
3509 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003510 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3511 __ Ret();
3512
3513 __ bind(&skip_cache);
3514 // Call C function to calculate the result and answer directly
3515 // without updating the cache.
3516 GenerateCallCFunction(masm, scratch0);
3517 __ GetCFunctionDoubleResult(f4);
3518 __ bind(&no_update);
3519
3520 // We return the value in f4 without adding it to the cache, but
3521 // we cause a scavenging GC so that future allocations will succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003522 {
3523 FrameScope scope(masm, StackFrame::INTERNAL);
Ben Murdoch257744e2011-11-30 15:57:28 +00003524
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003525 // Allocate an aligned object larger than a HeapNumber.
3526 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3527 __ li(scratch0, Operand(4 * kPointerSize));
3528 __ push(scratch0);
3529 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3530 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003531 __ Ret();
3532 }
3533}
3534
3535
3536void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3537 Register scratch) {
3538 __ push(ra);
3539 __ PrepareCallCFunction(2, scratch);
3540 if (IsMipsSoftFloatABI) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003541 __ Move(a0, a1, f4);
Ben Murdoch257744e2011-11-30 15:57:28 +00003542 } else {
3543 __ mov_d(f12, f4);
3544 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003545 AllowExternalCallThatCantCauseGC scope(masm);
3546 Isolate* isolate = masm->isolate();
Ben Murdoch257744e2011-11-30 15:57:28 +00003547 switch (type_) {
3548 case TranscendentalCache::SIN:
3549 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003550 ExternalReference::math_sin_double_function(isolate),
3551 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003552 break;
3553 case TranscendentalCache::COS:
3554 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003555 ExternalReference::math_cos_double_function(isolate),
3556 0, 1);
3557 break;
3558 case TranscendentalCache::TAN:
3559 __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3560 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003561 break;
3562 case TranscendentalCache::LOG:
3563 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003564 ExternalReference::math_log_double_function(isolate),
3565 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003566 break;
3567 default:
3568 UNIMPLEMENTED();
3569 break;
3570 }
3571 __ pop(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003572}
3573
3574
3575Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
Ben Murdoch257744e2011-11-30 15:57:28 +00003576 switch (type_) {
3577 // Add more cases when necessary.
3578 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3579 case TranscendentalCache::COS: return Runtime::kMath_cos;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003580 case TranscendentalCache::TAN: return Runtime::kMath_tan;
Ben Murdoch257744e2011-11-30 15:57:28 +00003581 case TranscendentalCache::LOG: return Runtime::kMath_log;
3582 default:
3583 UNIMPLEMENTED();
3584 return Runtime::kAbort;
3585 }
Steve Block44f0eee2011-05-26 01:26:41 +01003586}
3587
3588
3589void StackCheckStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003590 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003591}
3592
3593
Ben Murdoch257744e2011-11-30 15:57:28 +00003594void MathPowStub::Generate(MacroAssembler* masm) {
3595 Label call_runtime;
3596
3597 if (CpuFeatures::IsSupported(FPU)) {
3598 CpuFeatures::Scope scope(FPU);
3599
3600 Label base_not_smi;
3601 Label exponent_not_smi;
3602 Label convert_exponent;
3603
3604 const Register base = a0;
3605 const Register exponent = a2;
3606 const Register heapnumbermap = t1;
3607 const Register heapnumber = s0; // Callee-saved register.
3608 const Register scratch = t2;
3609 const Register scratch2 = t3;
3610
3611 // Alocate FP values in the ABI-parameter-passing regs.
3612 const DoubleRegister double_base = f12;
3613 const DoubleRegister double_exponent = f14;
3614 const DoubleRegister double_result = f0;
3615 const DoubleRegister double_scratch = f2;
3616
3617 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3618 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3619 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3620
3621 // Convert base to double value and store it in f0.
3622 __ JumpIfNotSmi(base, &base_not_smi);
3623 // Base is a Smi. Untag and convert it.
3624 __ SmiUntag(base);
3625 __ mtc1(base, double_scratch);
3626 __ cvt_d_w(double_base, double_scratch);
3627 __ Branch(&convert_exponent);
3628
3629 __ bind(&base_not_smi);
3630 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3631 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3632 // Base is a heapnumber. Load it into double register.
3633 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3634
3635 __ bind(&convert_exponent);
3636 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3637 __ SmiUntag(exponent);
3638
3639 // The base is in a double register and the exponent is
3640 // an untagged smi. Allocate a heap number and call a
3641 // C function for integer exponents. The register containing
3642 // the heap number is callee-saved.
3643 __ AllocateHeapNumber(heapnumber,
3644 scratch,
3645 scratch2,
3646 heapnumbermap,
3647 &call_runtime);
3648 __ push(ra);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003649 __ PrepareCallCFunction(1, 1, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003650 __ SetCallCDoubleArguments(double_base, exponent);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003651 {
3652 AllowExternalCallThatCantCauseGC scope(masm);
3653 __ CallCFunction(
3654 ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
3655 __ pop(ra);
3656 __ GetCFunctionDoubleResult(double_result);
3657 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003658 __ sdc1(double_result,
3659 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3660 __ mov(v0, heapnumber);
3661 __ DropAndRet(2 * kPointerSize);
3662
3663 __ bind(&exponent_not_smi);
3664 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3665 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3666 // Exponent is a heapnumber. Load it into double register.
3667 __ ldc1(double_exponent,
3668 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3669
3670 // The base and the exponent are in double registers.
3671 // Allocate a heap number and call a C function for
3672 // double exponents. The register containing
3673 // the heap number is callee-saved.
3674 __ AllocateHeapNumber(heapnumber,
3675 scratch,
3676 scratch2,
3677 heapnumbermap,
3678 &call_runtime);
3679 __ push(ra);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003680 __ PrepareCallCFunction(0, 2, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003681 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3682 ASSERT(double_base.is(f12));
3683 ASSERT(double_exponent.is(f14));
3684 __ SetCallCDoubleArguments(double_base, double_exponent);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003685 {
3686 AllowExternalCallThatCantCauseGC scope(masm);
3687 __ CallCFunction(
3688 ExternalReference::power_double_double_function(masm->isolate()),
3689 0,
3690 2);
3691 __ pop(ra);
3692 __ GetCFunctionDoubleResult(double_result);
3693 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003694 __ sdc1(double_result,
3695 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3696 __ mov(v0, heapnumber);
3697 __ DropAndRet(2 * kPointerSize);
3698 }
3699
3700 __ bind(&call_runtime);
3701 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003702}
3703
3704
3705bool CEntryStub::NeedsImmovableCode() {
3706 return true;
3707}
3708
3709
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003710bool CEntryStub::IsPregenerated() {
3711 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3712 result_size_ == 1;
3713}
3714
3715
3716void CodeStub::GenerateStubsAheadOfTime() {
3717 CEntryStub::GenerateAheadOfTime();
3718 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3719 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3720 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3721}
3722
3723
3724void CodeStub::GenerateFPStubs() {
3725 CEntryStub save_doubles(1, kSaveFPRegs);
3726 Handle<Code> code = save_doubles.GetCode();
3727 code->set_is_pregenerated(true);
3728 StoreBufferOverflowStub stub(kSaveFPRegs);
3729 stub.GetCode()->set_is_pregenerated(true);
3730 code->GetIsolate()->set_fp_stubs_generated(true);
3731}
3732
3733
3734void CEntryStub::GenerateAheadOfTime() {
3735 CEntryStub stub(1, kDontSaveFPRegs);
3736 Handle<Code> code = stub.GetCode();
3737 code->set_is_pregenerated(true);
3738}
3739
3740
Steve Block44f0eee2011-05-26 01:26:41 +01003741void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003742 __ Throw(v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003743}
3744
3745
3746void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3747 UncatchableExceptionType type) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003748 __ ThrowUncatchable(type, v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003749}
3750
3751
3752void CEntryStub::GenerateCore(MacroAssembler* masm,
3753 Label* throw_normal_exception,
3754 Label* throw_termination_exception,
3755 Label* throw_out_of_memory_exception,
3756 bool do_gc,
3757 bool always_allocate) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003758 // v0: result parameter for PerformGC, if any
3759 // s0: number of arguments including receiver (C callee-saved)
3760 // s1: pointer to the first argument (C callee-saved)
3761 // s2: pointer to builtin function (C callee-saved)
3762
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003763 Isolate* isolate = masm->isolate();
3764
Ben Murdoch257744e2011-11-30 15:57:28 +00003765 if (do_gc) {
3766 // Move result passed in v0 into a0 to call PerformGC.
3767 __ mov(a0, v0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003768 __ PrepareCallCFunction(1, 0, a1);
3769 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
Ben Murdoch257744e2011-11-30 15:57:28 +00003770 }
3771
3772 ExternalReference scope_depth =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003773 ExternalReference::heap_always_allocate_scope_depth(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00003774 if (always_allocate) {
3775 __ li(a0, Operand(scope_depth));
3776 __ lw(a1, MemOperand(a0));
3777 __ Addu(a1, a1, Operand(1));
3778 __ sw(a1, MemOperand(a0));
3779 }
3780
3781 // Prepare arguments for C routine: a0 = argc, a1 = argv
3782 __ mov(a0, s0);
3783 __ mov(a1, s1);
3784
3785 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3786 // also need to reserve the 4 argument slots on the stack.
3787
3788 __ AssertStackIsAligned();
3789
3790 __ li(a2, Operand(ExternalReference::isolate_address()));
3791
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003792 // To let the GC traverse the return address of the exit frames, we need to
3793 // know where the return address is. The CEntryStub is unmovable, so
3794 // we can store the address on the stack to be able to find it again and
3795 // we never have to restore it, because it will not change.
Ben Murdoch257744e2011-11-30 15:57:28 +00003796 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3797 // This branch-and-link sequence is needed to find the current PC on mips,
3798 // saved to the ra register.
3799 // Use masm-> here instead of the double-underscore macro since extra
3800 // coverage code can interfere with the proper calculation of ra.
3801 Label find_ra;
3802 masm->bal(&find_ra); // bal exposes branch delay slot.
3803 masm->nop(); // Branch delay slot nop.
3804 masm->bind(&find_ra);
3805
3806 // Adjust the value in ra to point to the correct return location, 2nd
3807 // instruction past the real call into C code (the jalr(t9)), and push it.
3808 // This is the return address of the exit frame.
3809 const int kNumInstructionsToJump = 6;
3810 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3811 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
Ben Murdoch589d6972011-11-30 16:04:58 +00003812 masm->Subu(sp, sp, kCArgsSlotsSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00003813 // Stack is still aligned.
3814
3815 // Call the C routine.
3816 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3817 masm->jalr(t9);
3818 masm->nop(); // Branch delay slot nop.
3819 // Make sure the stored 'ra' points to this position.
3820 ASSERT_EQ(kNumInstructionsToJump,
3821 masm->InstructionsGeneratedSince(&find_ra));
3822 }
3823
3824 // Restore stack (remove arg slots).
Ben Murdoch589d6972011-11-30 16:04:58 +00003825 __ Addu(sp, sp, kCArgsSlotsSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00003826
3827 if (always_allocate) {
3828 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3829 __ li(a2, Operand(scope_depth));
3830 __ lw(a3, MemOperand(a2));
3831 __ Subu(a3, a3, Operand(1));
3832 __ sw(a3, MemOperand(a2));
3833 }
3834
3835 // Check for failure result.
3836 Label failure_returned;
3837 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3838 __ addiu(a2, v0, 1);
3839 __ andi(t0, a2, kFailureTagMask);
3840 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3841
3842 // Exit C frame and return.
3843 // v0:v1: result
3844 // sp: stack pointer
3845 // fp: frame pointer
3846 __ LeaveExitFrame(save_doubles_, s0);
3847 __ Ret();
3848
3849 // Check if we should retry or throw exception.
3850 Label retry;
3851 __ bind(&failure_returned);
3852 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3853 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3854 __ Branch(&retry, eq, t0, Operand(zero_reg));
3855
3856 // Special handling of out of memory exceptions.
3857 Failure* out_of_memory = Failure::OutOfMemoryException();
3858 __ Branch(throw_out_of_memory_exception, eq,
3859 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3860
3861 // Retrieve the pending exception and clear the variable.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003862 __ li(a3, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00003863 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003864 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003865 __ lw(v0, MemOperand(t0));
3866 __ sw(a3, MemOperand(t0));
3867
3868 // Special handling of termination exceptions which are uncatchable
3869 // by javascript code.
3870 __ Branch(throw_termination_exception, eq,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003871 v0, Operand(isolate->factory()->termination_exception()));
Ben Murdoch257744e2011-11-30 15:57:28 +00003872
3873 // Handle normal exception.
3874 __ jmp(throw_normal_exception);
3875
3876 __ bind(&retry);
3877 // Last failure (v0) will be moved to (a0) for parameter when retrying.
Steve Block44f0eee2011-05-26 01:26:41 +01003878}
3879
3880
3881void CEntryStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003882 // Called from JavaScript; parameters are on stack as if calling JS function
3883 // a0: number of arguments including receiver
3884 // a1: pointer to builtin function
3885 // fp: frame pointer (restored after C call)
3886 // sp: stack pointer (restored as callee's sp after C call)
3887 // cp: current context (C callee-saved)
3888
3889 // NOTE: Invocations of builtins may return failure objects
3890 // instead of a proper result. The builtin entry handles
3891 // this by performing a garbage collection and retrying the
3892 // builtin once.
3893
3894 // Compute the argv pointer in a callee-saved register.
3895 __ sll(s1, a0, kPointerSizeLog2);
3896 __ Addu(s1, sp, s1);
3897 __ Subu(s1, s1, Operand(kPointerSize));
3898
3899 // Enter the exit frame that transitions from JavaScript to C++.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003900 FrameScope scope(masm, StackFrame::MANUAL);
Ben Murdoch257744e2011-11-30 15:57:28 +00003901 __ EnterExitFrame(save_doubles_);
3902
3903 // Setup argc and the builtin function in callee-saved registers.
3904 __ mov(s0, a0);
3905 __ mov(s2, a1);
3906
3907 // s0: number of arguments (C callee-saved)
3908 // s1: pointer to first argument (C callee-saved)
3909 // s2: pointer to builtin function (C callee-saved)
3910
3911 Label throw_normal_exception;
3912 Label throw_termination_exception;
3913 Label throw_out_of_memory_exception;
3914
3915 // Call into the runtime system.
3916 GenerateCore(masm,
3917 &throw_normal_exception,
3918 &throw_termination_exception,
3919 &throw_out_of_memory_exception,
3920 false,
3921 false);
3922
3923 // Do space-specific GC and retry runtime call.
3924 GenerateCore(masm,
3925 &throw_normal_exception,
3926 &throw_termination_exception,
3927 &throw_out_of_memory_exception,
3928 true,
3929 false);
3930
3931 // Do full GC and retry runtime call one final time.
3932 Failure* failure = Failure::InternalError();
3933 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3934 GenerateCore(masm,
3935 &throw_normal_exception,
3936 &throw_termination_exception,
3937 &throw_out_of_memory_exception,
3938 true,
3939 true);
3940
3941 __ bind(&throw_out_of_memory_exception);
3942 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3943
3944 __ bind(&throw_termination_exception);
3945 GenerateThrowUncatchable(masm, TERMINATION);
3946
3947 __ bind(&throw_normal_exception);
3948 GenerateThrowTOS(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003949}
3950
3951
3952void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003953 Label invoke, handler_entry, exit;
3954 Isolate* isolate = masm->isolate();
Ben Murdoch257744e2011-11-30 15:57:28 +00003955
3956 // Registers:
3957 // a0: entry address
3958 // a1: function
3959 // a2: reveiver
3960 // a3: argc
3961 //
3962 // Stack:
3963 // 4 args slots
3964 // args
3965
3966 // Save callee saved registers on the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003967 __ MultiPush(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00003968
Ben Murdoch589d6972011-11-30 16:04:58 +00003969 if (CpuFeatures::IsSupported(FPU)) {
3970 CpuFeatures::Scope scope(FPU);
3971 // Save callee-saved FPU registers.
3972 __ MultiPushFPU(kCalleeSavedFPU);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003973 // Set up the reserved register for 0.0.
3974 __ Move(kDoubleRegZero, 0.0);
Ben Murdoch589d6972011-11-30 16:04:58 +00003975 }
3976
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003977
Ben Murdoch257744e2011-11-30 15:57:28 +00003978 // Load argv in s0 register.
Ben Murdoch589d6972011-11-30 16:04:58 +00003979 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3980 if (CpuFeatures::IsSupported(FPU)) {
3981 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
3982 }
3983
3984 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00003985
3986 // We build an EntryFrame.
3987 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3988 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3989 __ li(t2, Operand(Smi::FromInt(marker)));
3990 __ li(t1, Operand(Smi::FromInt(marker)));
Ben Murdoch589d6972011-11-30 16:04:58 +00003991 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003992 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003993 __ lw(t0, MemOperand(t0));
3994 __ Push(t3, t2, t1, t0);
3995 // Setup frame pointer for the frame to be pushed.
3996 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3997
3998 // Registers:
3999 // a0: entry_address
4000 // a1: function
4001 // a2: reveiver_pointer
4002 // a3: argc
4003 // s0: argv
4004 //
4005 // Stack:
4006 // caller fp |
4007 // function slot | entry frame
4008 // context slot |
4009 // bad fp (0xff...f) |
4010 // callee saved registers + ra
4011 // 4 args slots
4012 // args
4013
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004014 // If this is the outermost JS call, set js_entry_sp value.
4015 Label non_outermost_js;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004016 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004017 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4018 __ lw(t2, MemOperand(t1));
4019 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4020 __ sw(fp, MemOperand(t1));
4021 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4022 Label cont;
4023 __ b(&cont);
4024 __ nop(); // Branch delay slot nop.
4025 __ bind(&non_outermost_js);
4026 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4027 __ bind(&cont);
4028 __ push(t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004029
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004030 // Jump to a faked try block that does the invoke, with a faked catch
4031 // block that sets the pending exception.
4032 __ jmp(&invoke);
4033 __ bind(&handler_entry);
4034 handler_offset_ = handler_entry.pos();
4035 // Caught exception: Store result (exception) in the pending exception
4036 // field in the JSEnv and return a failure sentinel. Coming in here the
4037 // fp will be invalid because the PushTryHandler below sets it to 0 to
4038 // signal the existence of the JSEntry frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00004039 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004040 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004041 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4042 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4043 __ b(&exit); // b exposes branch delay slot.
4044 __ nop(); // Branch delay slot nop.
4045
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004046 // Invoke: Link this frame into the handler chain. There's only one
4047 // handler block in this code object, so its index is 0.
Ben Murdoch257744e2011-11-30 15:57:28 +00004048 __ bind(&invoke);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004049 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004050 // If an exception not caught by another handler occurs, this handler
4051 // returns control to the code after the bal(&invoke) above, which
4052 // restores all kCalleeSaved registers (including cp and fp) to their
4053 // saved values before returning a failure to C.
4054
4055 // Clear any pending exceptions.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004056 __ li(t1, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00004057 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004058 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004059 __ sw(t1, MemOperand(t0));
4060
4061 // Invoke the function by calling through JS entry trampoline builtin.
4062 // Notice that we cannot store a reference to the trampoline code directly in
4063 // this stub, because runtime stubs are not traversed when doing GC.
4064
4065 // Registers:
4066 // a0: entry_address
4067 // a1: function
4068 // a2: reveiver_pointer
4069 // a3: argc
4070 // s0: argv
4071 //
4072 // Stack:
4073 // handler frame
4074 // entry frame
4075 // callee saved registers + ra
4076 // 4 args slots
4077 // args
4078
4079 if (is_construct) {
4080 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004081 isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004082 __ li(t0, Operand(construct_entry));
4083 } else {
4084 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4085 __ li(t0, Operand(entry));
4086 }
4087 __ lw(t9, MemOperand(t0)); // Deref address.
4088
4089 // Call JSEntryTrampoline.
4090 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4091 __ Call(t9);
4092
4093 // Unlink this frame from the handler chain.
4094 __ PopTryHandler();
4095
4096 __ bind(&exit); // v0 holds result
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004097 // Check if the current stack frame is marked as the outermost JS frame.
4098 Label non_outermost_js_2;
4099 __ pop(t1);
4100 __ Branch(&non_outermost_js_2, ne, t1,
4101 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4102 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4103 __ sw(zero_reg, MemOperand(t1));
4104 __ bind(&non_outermost_js_2);
Ben Murdoch257744e2011-11-30 15:57:28 +00004105
4106 // Restore the top frame descriptors from the stack.
4107 __ pop(t1);
Ben Murdoch589d6972011-11-30 16:04:58 +00004108 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004109 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004110 __ sw(t1, MemOperand(t0));
4111
4112 // Reset the stack to the callee saved registers.
4113 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4114
Ben Murdoch589d6972011-11-30 16:04:58 +00004115 if (CpuFeatures::IsSupported(FPU)) {
4116 CpuFeatures::Scope scope(FPU);
4117 // Restore callee-saved fpu registers.
4118 __ MultiPopFPU(kCalleeSavedFPU);
4119 }
4120
Ben Murdoch257744e2011-11-30 15:57:28 +00004121 // Restore callee saved registers from the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004122 __ MultiPop(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00004123 // Return.
4124 __ Jump(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01004125}
4126
4127
Ben Murdoch257744e2011-11-30 15:57:28 +00004128// Uses registers a0 to t0.
4129// Expected input (depending on whether args are in registers or on the stack):
4130// * object: a0 or at sp + 1 * kPointerSize.
4131// * function: a1 or at sp.
4132//
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004133// An inlined call site may have been generated before calling this stub.
4134// In this case the offset to the inline site to patch is passed on the stack,
4135// in the safepoint slot for register t0.
Steve Block44f0eee2011-05-26 01:26:41 +01004136void InstanceofStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004137 // Call site inlining and patching implies arguments in registers.
4138 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4139 // ReturnTrueFalse is only implemented for inlined call sites.
4140 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4141
4142 // Fixed register usage throughout the stub:
4143 const Register object = a0; // Object (lhs).
4144 Register map = a3; // Map of the object.
4145 const Register function = a1; // Function (rhs).
4146 const Register prototype = t0; // Prototype of the function.
4147 const Register inline_site = t5;
4148 const Register scratch = a2;
4149
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004150 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4151
Ben Murdoch257744e2011-11-30 15:57:28 +00004152 Label slow, loop, is_instance, is_not_instance, not_js_object;
4153
4154 if (!HasArgsInRegisters()) {
4155 __ lw(object, MemOperand(sp, 1 * kPointerSize));
4156 __ lw(function, MemOperand(sp, 0));
4157 }
4158
4159 // Check that the left hand is a JS object and load map.
4160 __ JumpIfSmi(object, &not_js_object);
4161 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4162
4163 // If there is a call site cache don't look in the global cache, but do the
4164 // real lookup and update the call site cache.
4165 if (!HasCallSiteInlineCheck()) {
4166 Label miss;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004167 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4168 __ Branch(&miss, ne, function, Operand(at));
4169 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4170 __ Branch(&miss, ne, map, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +00004171 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4172 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4173
4174 __ bind(&miss);
4175 }
4176
4177 // Get the prototype of the function.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004178 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00004179
4180 // Check that the function prototype is a JS object.
4181 __ JumpIfSmi(prototype, &slow);
4182 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4183
4184 // Update the global instanceof or call site inlined cache with the current
4185 // map and function. The cached answer will be set when it is known below.
4186 if (!HasCallSiteInlineCheck()) {
4187 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4188 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4189 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004190 ASSERT(HasArgsInRegisters());
4191 // Patch the (relocated) inlined map check.
4192
4193 // The offset was stored in t0 safepoint slot.
4194 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4195 __ LoadFromSafepointRegisterSlot(scratch, t0);
4196 __ Subu(inline_site, ra, scratch);
4197 // Get the map location in scratch and patch it.
4198 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4199 __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004200 }
4201
4202 // Register mapping: a3 is object map and t0 is function prototype.
4203 // Get prototype of object into a2.
4204 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4205
4206 // We don't need map any more. Use it as a scratch register.
4207 Register scratch2 = map;
4208 map = no_reg;
4209
4210 // Loop through the prototype chain looking for the function prototype.
4211 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4212 __ bind(&loop);
4213 __ Branch(&is_instance, eq, scratch, Operand(prototype));
4214 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4215 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4216 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4217 __ Branch(&loop);
4218
4219 __ bind(&is_instance);
4220 ASSERT(Smi::FromInt(0) == 0);
4221 if (!HasCallSiteInlineCheck()) {
4222 __ mov(v0, zero_reg);
4223 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4224 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004225 // Patch the call site to return true.
4226 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4227 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4228 // Get the boolean result location in scratch and patch it.
4229 __ PatchRelocatedValue(inline_site, scratch, v0);
4230
4231 if (!ReturnTrueFalseObject()) {
4232 ASSERT_EQ(Smi::FromInt(0), 0);
4233 __ mov(v0, zero_reg);
4234 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004235 }
4236 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4237
4238 __ bind(&is_not_instance);
4239 if (!HasCallSiteInlineCheck()) {
4240 __ li(v0, Operand(Smi::FromInt(1)));
4241 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4242 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004243 // Patch the call site to return false.
4244 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4245 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4246 // Get the boolean result location in scratch and patch it.
4247 __ PatchRelocatedValue(inline_site, scratch, v0);
4248
4249 if (!ReturnTrueFalseObject()) {
4250 __ li(v0, Operand(Smi::FromInt(1)));
4251 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004252 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004253
Ben Murdoch257744e2011-11-30 15:57:28 +00004254 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4255
4256 Label object_not_null, object_not_null_or_smi;
4257 __ bind(&not_js_object);
4258 // Before null, smi and string value checks, check that the rhs is a function
4259 // as for a non-function rhs an exception needs to be thrown.
4260 __ JumpIfSmi(function, &slow);
4261 __ GetObjectType(function, scratch2, scratch);
4262 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4263
4264 // Null is not instance of anything.
4265 __ Branch(&object_not_null, ne, scratch,
4266 Operand(masm->isolate()->factory()->null_value()));
4267 __ li(v0, Operand(Smi::FromInt(1)));
4268 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4269
4270 __ bind(&object_not_null);
4271 // Smi values are not instances of anything.
4272 __ JumpIfNotSmi(object, &object_not_null_or_smi);
4273 __ li(v0, Operand(Smi::FromInt(1)));
4274 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4275
4276 __ bind(&object_not_null_or_smi);
4277 // String values are not instances of anything.
4278 __ IsObjectJSStringType(object, scratch, &slow);
4279 __ li(v0, Operand(Smi::FromInt(1)));
4280 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4281
4282 // Slow-case. Tail call builtin.
4283 __ bind(&slow);
4284 if (!ReturnTrueFalseObject()) {
4285 if (HasArgsInRegisters()) {
4286 __ Push(a0, a1);
4287 }
4288 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4289 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004290 {
4291 FrameScope scope(masm, StackFrame::INTERNAL);
4292 __ Push(a0, a1);
4293 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4294 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004295 __ mov(a0, v0);
4296 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4297 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4298 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4299 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4300 }
Steve Block44f0eee2011-05-26 01:26:41 +01004301}
4302
4303
Ben Murdoch257744e2011-11-30 15:57:28 +00004304Register InstanceofStub::left() { return a0; }
4305
4306
4307Register InstanceofStub::right() { return a1; }
4308
4309
Steve Block44f0eee2011-05-26 01:26:41 +01004310void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004311 // The displacement is the offset of the last parameter (if any)
4312 // relative to the frame pointer.
4313 static const int kDisplacement =
4314 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4315
4316 // Check that the key is a smiGenerateReadElement.
4317 Label slow;
4318 __ JumpIfNotSmi(a1, &slow);
4319
4320 // Check if the calling frame is an arguments adaptor frame.
4321 Label adaptor;
4322 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4323 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4324 __ Branch(&adaptor,
4325 eq,
4326 a3,
4327 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4328
4329 // Check index (a1) against formal parameters count limit passed in
4330 // through register a0. Use unsigned comparison to get negative
4331 // check for free.
4332 __ Branch(&slow, hs, a1, Operand(a0));
4333
4334 // Read the argument from the stack and return it.
4335 __ subu(a3, a0, a1);
4336 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4337 __ Addu(a3, fp, Operand(t3));
4338 __ lw(v0, MemOperand(a3, kDisplacement));
4339 __ Ret();
4340
4341 // Arguments adaptor case: Check index (a1) against actual arguments
4342 // limit found in the arguments adaptor frame. Use unsigned
4343 // comparison to get negative check for free.
4344 __ bind(&adaptor);
4345 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4346 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4347
4348 // Read the argument from the adaptor frame and return it.
4349 __ subu(a3, a0, a1);
4350 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4351 __ Addu(a3, a2, Operand(t3));
4352 __ lw(v0, MemOperand(a3, kDisplacement));
4353 __ Ret();
4354
4355 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4356 // by calling the runtime system.
4357 __ bind(&slow);
4358 __ push(a1);
4359 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004360}
4361
4362
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004363void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004364 // sp[0] : number of parameters
4365 // sp[4] : receiver displacement
4366 // sp[8] : function
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004367 // Check if the calling frame is an arguments adaptor frame.
4368 Label runtime;
4369 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4370 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4371 __ Branch(&runtime, ne,
4372 a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004373
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004374 // Patch the arguments.length and the parameters pointer in the current frame.
4375 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4376 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4377 __ sll(t3, a2, 1);
4378 __ Addu(a3, a3, Operand(t3));
4379 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4380 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4381
4382 __ bind(&runtime);
4383 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4384}
4385
4386
4387void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4388 // Stack layout:
4389 // sp[0] : number of parameters (tagged)
4390 // sp[4] : address of receiver argument
4391 // sp[8] : function
4392 // Registers used over whole function:
4393 // t2 : allocated object (tagged)
4394 // t5 : mapped parameter count (tagged)
4395
4396 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4397 // a1 = parameter count (tagged)
4398
4399 // Check if the calling frame is an arguments adaptor frame.
4400 Label runtime;
4401 Label adaptor_frame, try_allocate;
4402 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4403 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4404 __ Branch(&adaptor_frame, eq, a2,
4405 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4406
4407 // No adaptor, parameter count = argument count.
4408 __ mov(a2, a1);
4409 __ b(&try_allocate);
4410 __ nop(); // Branch delay slot nop.
4411
4412 // We have an adaptor frame. Patch the parameters pointer.
4413 __ bind(&adaptor_frame);
4414 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4415 __ sll(t6, a2, 1);
4416 __ Addu(a3, a3, Operand(t6));
4417 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4418 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4419
4420 // a1 = parameter count (tagged)
4421 // a2 = argument count (tagged)
4422 // Compute the mapped parameter count = min(a1, a2) in a1.
4423 Label skip_min;
4424 __ Branch(&skip_min, lt, a1, Operand(a2));
4425 __ mov(a1, a2);
4426 __ bind(&skip_min);
4427
4428 __ bind(&try_allocate);
4429
4430 // Compute the sizes of backing store, parameter map, and arguments object.
4431 // 1. Parameter map, has 2 extra words containing context and backing store.
4432 const int kParameterMapHeaderSize =
4433 FixedArray::kHeaderSize + 2 * kPointerSize;
4434 // If there are no mapped parameters, we do not need the parameter_map.
4435 Label param_map_size;
4436 ASSERT_EQ(0, Smi::FromInt(0));
4437 __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4438 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4439 __ sll(t5, a1, 1);
4440 __ addiu(t5, t5, kParameterMapHeaderSize);
4441 __ bind(&param_map_size);
4442
4443 // 2. Backing store.
4444 __ sll(t6, a2, 1);
4445 __ Addu(t5, t5, Operand(t6));
4446 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4447
4448 // 3. Arguments object.
4449 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4450
4451 // Do the allocation of all three objects in one go.
4452 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4453
4454 // v0 = address of new object(s) (tagged)
4455 // a2 = argument count (tagged)
4456 // Get the arguments boilerplate from the current (global) context into t0.
4457 const int kNormalOffset =
4458 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4459 const int kAliasedOffset =
4460 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4461
4462 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4463 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4464 Label skip2_ne, skip2_eq;
4465 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4466 __ lw(t0, MemOperand(t0, kNormalOffset));
4467 __ bind(&skip2_ne);
4468
4469 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4470 __ lw(t0, MemOperand(t0, kAliasedOffset));
4471 __ bind(&skip2_eq);
4472
4473 // v0 = address of new object (tagged)
4474 // a1 = mapped parameter count (tagged)
4475 // a2 = argument count (tagged)
4476 // t0 = address of boilerplate object (tagged)
4477 // Copy the JS object part.
4478 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4479 __ lw(a3, FieldMemOperand(t0, i));
4480 __ sw(a3, FieldMemOperand(v0, i));
4481 }
4482
4483 // Setup the callee in-object property.
4484 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4485 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4486 const int kCalleeOffset = JSObject::kHeaderSize +
4487 Heap::kArgumentsCalleeIndex * kPointerSize;
4488 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4489
4490 // Use the length (smi tagged) and set that as an in-object property too.
4491 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4492 const int kLengthOffset = JSObject::kHeaderSize +
4493 Heap::kArgumentsLengthIndex * kPointerSize;
4494 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4495
4496 // Setup the elements pointer in the allocated arguments object.
4497 // If we allocated a parameter map, t0 will point there, otherwise
4498 // it will point to the backing store.
4499 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4500 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4501
4502 // v0 = address of new object (tagged)
4503 // a1 = mapped parameter count (tagged)
4504 // a2 = argument count (tagged)
4505 // t0 = address of parameter map or backing store (tagged)
4506 // Initialize parameter map. If there are no mapped arguments, we're done.
4507 Label skip_parameter_map;
4508 Label skip3;
4509 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4510 // Move backing store address to a3, because it is
4511 // expected there when filling in the unmapped arguments.
4512 __ mov(a3, t0);
4513 __ bind(&skip3);
4514
4515 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4516
4517 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4518 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4519 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4520 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4521 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4522 __ sll(t6, a1, 1);
4523 __ Addu(t2, t0, Operand(t6));
4524 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4525 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4526
4527 // Copy the parameter slots and the holes in the arguments.
4528 // We need to fill in mapped_parameter_count slots. They index the context,
4529 // where parameters are stored in reverse order, at
4530 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4531 // The mapped parameter thus need to get indices
4532 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4533 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4534 // We loop from right to left.
4535 Label parameters_loop, parameters_test;
4536 __ mov(t2, a1);
4537 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4538 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4539 __ Subu(t5, t5, Operand(a1));
4540 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4541 __ sll(t6, t2, 1);
4542 __ Addu(a3, t0, Operand(t6));
4543 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4544
4545 // t2 = loop variable (tagged)
4546 // a1 = mapping index (tagged)
4547 // a3 = address of backing store (tagged)
4548 // t0 = address of parameter map (tagged)
4549 // t1 = temporary scratch (a.o., for address calculation)
4550 // t3 = the hole value
4551 __ jmp(&parameters_test);
4552
4553 __ bind(&parameters_loop);
4554 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4555 __ sll(t1, t2, 1);
4556 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4557 __ Addu(t6, t0, t1);
4558 __ sw(t5, MemOperand(t6));
4559 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4560 __ Addu(t6, a3, t1);
4561 __ sw(t3, MemOperand(t6));
4562 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4563 __ bind(&parameters_test);
4564 __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4565
4566 __ bind(&skip_parameter_map);
4567 // a2 = argument count (tagged)
4568 // a3 = address of backing store (tagged)
4569 // t1 = scratch
4570 // Copy arguments header and remaining slots (if there are any).
4571 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4572 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4573 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4574
4575 Label arguments_loop, arguments_test;
4576 __ mov(t5, a1);
4577 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4578 __ sll(t6, t5, 1);
4579 __ Subu(t0, t0, Operand(t6));
4580 __ jmp(&arguments_test);
4581
4582 __ bind(&arguments_loop);
4583 __ Subu(t0, t0, Operand(kPointerSize));
4584 __ lw(t2, MemOperand(t0, 0));
4585 __ sll(t6, t5, 1);
4586 __ Addu(t1, a3, Operand(t6));
4587 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4588 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4589
4590 __ bind(&arguments_test);
4591 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4592
4593 // Return and remove the on-stack parameters.
4594 __ Addu(sp, sp, Operand(3 * kPointerSize));
4595 __ Ret();
4596
4597 // Do the runtime call to allocate the arguments object.
4598 // a2 = argument count (taggged)
4599 __ bind(&runtime);
4600 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4601 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4602}
4603
4604
4605void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4606 // sp[0] : number of parameters
4607 // sp[4] : receiver displacement
4608 // sp[8] : function
Ben Murdoch257744e2011-11-30 15:57:28 +00004609 // Check if the calling frame is an arguments adaptor frame.
4610 Label adaptor_frame, try_allocate, runtime;
4611 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4612 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4613 __ Branch(&adaptor_frame,
4614 eq,
4615 a3,
4616 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4617
4618 // Get the length from the frame.
4619 __ lw(a1, MemOperand(sp, 0));
4620 __ Branch(&try_allocate);
4621
4622 // Patch the arguments.length and the parameters pointer.
4623 __ bind(&adaptor_frame);
4624 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4625 __ sw(a1, MemOperand(sp, 0));
4626 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4627 __ Addu(a3, a2, Operand(at));
4628
4629 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4630 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4631
4632 // Try the new space allocation. Start out with computing the size
4633 // of the arguments object and the elements array in words.
4634 Label add_arguments_object;
4635 __ bind(&try_allocate);
4636 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4637 __ srl(a1, a1, kSmiTagSize);
4638
4639 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4640 __ bind(&add_arguments_object);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004641 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004642
4643 // Do the allocation of both objects in one go.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004644 __ AllocateInNewSpace(a1,
4645 v0,
4646 a2,
4647 a3,
4648 &runtime,
4649 static_cast<AllocationFlags>(TAG_OBJECT |
4650 SIZE_IN_WORDS));
Ben Murdoch257744e2011-11-30 15:57:28 +00004651
4652 // Get the arguments boilerplate from the current (global) context.
4653 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4654 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004655 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4656 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004657
4658 // Copy the JS object part.
4659 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4660
Ben Murdoch257744e2011-11-30 15:57:28 +00004661 // Get the length (smi tagged) and set that as an in-object property too.
4662 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4663 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4664 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004665 Heap::kArgumentsLengthIndex * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004666
4667 Label done;
4668 __ Branch(&done, eq, a1, Operand(zero_reg));
4669
4670 // Get the parameters pointer from the stack.
4671 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4672
4673 // Setup the elements pointer in the allocated arguments object and
4674 // initialize the header in the elements fixed array.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004675 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
Ben Murdoch257744e2011-11-30 15:57:28 +00004676 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4677 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4678 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4679 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004680 // Untag the length for the loop.
4681 __ srl(a1, a1, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00004682
4683 // Copy the fixed array slots.
4684 Label loop;
4685 // Setup t0 to point to the first array slot.
4686 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4687 __ bind(&loop);
4688 // Pre-decrement a2 with kPointerSize on each iteration.
4689 // Pre-decrement in order to skip receiver.
4690 __ Addu(a2, a2, Operand(-kPointerSize));
4691 __ lw(a3, MemOperand(a2));
4692 // Post-increment t0 with kPointerSize on each iteration.
4693 __ sw(a3, MemOperand(t0));
4694 __ Addu(t0, t0, Operand(kPointerSize));
4695 __ Subu(a1, a1, Operand(1));
4696 __ Branch(&loop, ne, a1, Operand(zero_reg));
4697
4698 // Return and remove the on-stack parameters.
4699 __ bind(&done);
4700 __ Addu(sp, sp, Operand(3 * kPointerSize));
4701 __ Ret();
4702
4703 // Do the runtime call to allocate the arguments object.
4704 __ bind(&runtime);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004705 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004706}
4707
4708
4709void RegExpExecStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004710 // Just jump directly to runtime if native RegExp is not selected at compile
4711 // time or if regexp entry in generated code is turned off runtime switch or
4712 // at compilation.
4713#ifdef V8_INTERPRETED_REGEXP
4714 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4715#else // V8_INTERPRETED_REGEXP
Ben Murdoch257744e2011-11-30 15:57:28 +00004716
4717 // Stack frame on entry.
4718 // sp[0]: last_match_info (expected JSArray)
4719 // sp[4]: previous index
4720 // sp[8]: subject string
4721 // sp[12]: JSRegExp object
4722
4723 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4724 static const int kPreviousIndexOffset = 1 * kPointerSize;
4725 static const int kSubjectOffset = 2 * kPointerSize;
4726 static const int kJSRegExpOffset = 3 * kPointerSize;
4727
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004728 Isolate* isolate = masm->isolate();
4729
Ben Murdoch257744e2011-11-30 15:57:28 +00004730 Label runtime, invoke_regexp;
4731
4732 // Allocation of registers for this function. These are in callee save
4733 // registers and will be preserved by the call to the native RegExp code, as
4734 // this code is called using the normal C calling convention. When calling
4735 // directly from generated code the native RegExp code will not do a GC and
4736 // therefore the content of these registers are safe to use after the call.
4737 // MIPS - using s0..s2, since we are not using CEntry Stub.
4738 Register subject = s0;
4739 Register regexp_data = s1;
4740 Register last_match_info_elements = s2;
4741
4742 // Ensure that a RegExp stack is allocated.
4743 ExternalReference address_of_regexp_stack_memory_address =
4744 ExternalReference::address_of_regexp_stack_memory_address(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004745 isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004746 ExternalReference address_of_regexp_stack_memory_size =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004747 ExternalReference::address_of_regexp_stack_memory_size(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004748 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4749 __ lw(a0, MemOperand(a0, 0));
4750 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4751
4752 // Check that the first argument is a JSRegExp object.
4753 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4754 STATIC_ASSERT(kSmiTag == 0);
4755 __ JumpIfSmi(a0, &runtime);
4756 __ GetObjectType(a0, a1, a1);
4757 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4758
4759 // Check that the RegExp has been compiled (data contains a fixed array).
4760 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4761 if (FLAG_debug_code) {
4762 __ And(t0, regexp_data, Operand(kSmiTagMask));
4763 __ Check(nz,
4764 "Unexpected type for RegExp data, FixedArray expected",
4765 t0,
4766 Operand(zero_reg));
4767 __ GetObjectType(regexp_data, a0, a0);
4768 __ Check(eq,
4769 "Unexpected type for RegExp data, FixedArray expected",
4770 a0,
4771 Operand(FIXED_ARRAY_TYPE));
4772 }
4773
4774 // regexp_data: RegExp data (FixedArray)
4775 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4776 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4777 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4778
4779 // regexp_data: RegExp data (FixedArray)
4780 // Check that the number of captures fit in the static offsets vector buffer.
4781 __ lw(a2,
4782 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4783 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4784 // uses the asumption that smis are 2 * their untagged value.
4785 STATIC_ASSERT(kSmiTag == 0);
4786 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4787 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4788 // Check that the static offsets vector buffer is large enough.
4789 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4790
4791 // a2: Number of capture registers
4792 // regexp_data: RegExp data (FixedArray)
4793 // Check that the second argument is a string.
4794 __ lw(subject, MemOperand(sp, kSubjectOffset));
4795 __ JumpIfSmi(subject, &runtime);
4796 __ GetObjectType(subject, a0, a0);
4797 __ And(a0, a0, Operand(kIsNotStringMask));
4798 STATIC_ASSERT(kStringTag == 0);
4799 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4800
4801 // Get the length of the string to r3.
4802 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4803
4804 // a2: Number of capture registers
4805 // a3: Length of subject string as a smi
4806 // subject: Subject string
4807 // regexp_data: RegExp data (FixedArray)
4808 // Check that the third argument is a positive smi less than the subject
4809 // string length. A negative value will be greater (unsigned comparison).
4810 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004811 __ JumpIfNotSmi(a0, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00004812 __ Branch(&runtime, ls, a3, Operand(a0));
4813
4814 // a2: Number of capture registers
4815 // subject: Subject string
4816 // regexp_data: RegExp data (FixedArray)
4817 // Check that the fourth object is a JSArray object.
4818 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4819 __ JumpIfSmi(a0, &runtime);
4820 __ GetObjectType(a0, a1, a1);
4821 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4822 // Check that the JSArray is in fast case.
4823 __ lw(last_match_info_elements,
4824 FieldMemOperand(a0, JSArray::kElementsOffset));
4825 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4826 __ Branch(&runtime, ne, a0, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004827 isolate->factory()->fixed_array_map()));
Ben Murdoch257744e2011-11-30 15:57:28 +00004828 // Check that the last match info has space for the capture registers and the
4829 // additional information.
4830 __ lw(a0,
4831 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4832 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4833 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4834 __ Branch(&runtime, gt, a2, Operand(at));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004835
4836 // Reset offset for possibly sliced string.
4837 __ mov(t0, zero_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00004838 // subject: Subject string
4839 // regexp_data: RegExp data (FixedArray)
4840 // Check the representation and encoding of the subject string.
4841 Label seq_string;
4842 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4843 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004844 // First check for flat string. None of the following string type tests will
4845 // succeed if subject is not a string or a short external string.
4846 __ And(a1,
4847 a0,
4848 Operand(kIsNotStringMask |
4849 kStringRepresentationMask |
4850 kShortExternalStringMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00004851 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004852 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004853
4854 // subject: Subject string
4855 // a0: instance type if Subject string
4856 // regexp_data: RegExp data (FixedArray)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004857 // a1: whether subject is a string and if yes, its string representation
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004858 // Check for flat cons string or sliced string.
Ben Murdoch257744e2011-11-30 15:57:28 +00004859 // A flat cons string is a cons string where the second part is the empty
4860 // string. In that case the subject string is just the first part of the cons
4861 // string. Also in this case the first part of the cons string is known to be
4862 // a sequential string or an external string.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004863 // In the case of a sliced string its offset has to be taken into account.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004864 Label cons_string, external_string, check_encoding;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004865 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4866 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004867 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4868 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004869 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004870 __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
4871
4872 // Catch non-string subject or short external string.
4873 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4874 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
4875 __ Branch(&runtime, ne, at, Operand(zero_reg));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004876
4877 // String is sliced.
4878 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4879 __ sra(t0, t0, kSmiTagSize);
4880 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4881 // t5: offset of sliced string, smi-tagged.
4882 __ jmp(&check_encoding);
4883 // String is a cons string, check whether it is flat.
4884 __ bind(&cons_string);
Ben Murdoch257744e2011-11-30 15:57:28 +00004885 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4886 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4887 __ Branch(&runtime, ne, a0, Operand(a1));
4888 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004889 // Is first part of cons or parent of slice a flat string?
4890 __ bind(&check_encoding);
Ben Murdoch257744e2011-11-30 15:57:28 +00004891 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4892 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004893 STATIC_ASSERT(kSeqStringTag == 0);
4894 __ And(at, a0, Operand(kStringRepresentationMask));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004895 __ Branch(&external_string, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004896
4897 __ bind(&seq_string);
4898 // subject: Subject string
4899 // regexp_data: RegExp data (FixedArray)
4900 // a0: Instance type of subject string
4901 STATIC_ASSERT(kStringEncodingMask == 4);
4902 STATIC_ASSERT(kAsciiStringTag == 4);
4903 STATIC_ASSERT(kTwoByteStringTag == 0);
4904 // Find the code object based on the assumptions above.
4905 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
4906 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
4907 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004908 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
4909 __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
Ben Murdoch257744e2011-11-30 15:57:28 +00004910
4911 // Check that the irregexp code has been generated for the actual string
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004912 // encoding. If it has, the field contains a code object otherwise it contains
4913 // a smi (code flushing support).
4914 __ JumpIfSmi(t9, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00004915
4916 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4917 // t9: code
4918 // subject: Subject string
4919 // regexp_data: RegExp data (FixedArray)
4920 // Load used arguments before starting to push arguments for call to native
4921 // RegExp code to avoid handling changing stack height.
4922 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4923 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
4924
4925 // a1: previous index
4926 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4927 // t9: code
4928 // subject: Subject string
4929 // regexp_data: RegExp data (FixedArray)
4930 // All checks done. Now push arguments for native regexp code.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004931 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
Ben Murdoch257744e2011-11-30 15:57:28 +00004932 1, a0, a2);
4933
4934 // Isolates: note we add an additional parameter here (isolate pointer).
4935 static const int kRegExpExecuteArguments = 8;
4936 static const int kParameterRegisters = 4;
4937 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4938
4939 // Stack pointer now points to cell where return address is to be written.
4940 // Arguments are before that on the stack or in registers, meaning we
4941 // treat the return address as argument 5. Thus every argument after that
4942 // needs to be shifted back by 1. Since DirectCEntryStub will handle
4943 // allocating space for the c argument slots, we don't need to calculate
4944 // that into the argument positions on the stack. This is how the stack will
4945 // look (sp meaning the value of sp at this moment):
4946 // [sp + 4] - Argument 8
4947 // [sp + 3] - Argument 7
4948 // [sp + 2] - Argument 6
4949 // [sp + 1] - Argument 5
4950 // [sp + 0] - saved ra
4951
4952 // Argument 8: Pass current isolate address.
4953 // CFunctionArgumentOperand handles MIPS stack argument slots.
4954 __ li(a0, Operand(ExternalReference::isolate_address()));
4955 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4956
4957 // Argument 7: Indicate that this is a direct call from JavaScript.
4958 __ li(a0, Operand(1));
4959 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4960
4961 // Argument 6: Start (high end) of backtracking stack memory area.
4962 __ li(a0, Operand(address_of_regexp_stack_memory_address));
4963 __ lw(a0, MemOperand(a0, 0));
4964 __ li(a2, Operand(address_of_regexp_stack_memory_size));
4965 __ lw(a2, MemOperand(a2, 0));
4966 __ addu(a0, a0, a2);
4967 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4968
4969 // Argument 5: static offsets vector buffer.
4970 __ li(a0, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004971 ExternalReference::address_of_static_offsets_vector(isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004972 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4973
4974 // For arguments 4 and 3 get string length, calculate start of string data
4975 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004976 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00004977 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004978 // Load the length from the original subject string from the previous stack
4979 // frame. Therefore we have to use fp, which points exactly to two pointer
4980 // sizes below the previous sp. (Because creating a new stack frame pushes
4981 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
Ben Murdoch589d6972011-11-30 16:04:58 +00004982 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004983 // If slice offset is not 0, load the length from the original sliced string.
4984 // Argument 4, a3: End of string data
4985 // Argument 3, a2: Start of string data
4986 // Prepare start and end index of the input.
4987 __ sllv(t1, t0, a3);
4988 __ addu(t0, t2, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004989 __ sllv(t1, a1, a3);
4990 __ addu(a2, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004991
Ben Murdoch589d6972011-11-30 16:04:58 +00004992 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004993 __ sra(t2, t2, kSmiTagSize);
4994 __ sllv(t1, t2, a3);
4995 __ addu(a3, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004996 // Argument 2 (a1): Previous index.
4997 // Already there
4998
4999 // Argument 1 (a0): Subject string.
Ben Murdoch589d6972011-11-30 16:04:58 +00005000 __ mov(a0, subject);
Ben Murdoch257744e2011-11-30 15:57:28 +00005001
5002 // Locate the code entry and call it.
5003 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5004 DirectCEntryStub stub;
5005 stub.GenerateCall(masm, t9);
5006
5007 __ LeaveExitFrame(false, no_reg);
5008
5009 // v0: result
5010 // subject: subject string (callee saved)
5011 // regexp_data: RegExp data (callee saved)
5012 // last_match_info_elements: Last match info elements (callee saved)
5013
5014 // Check the result.
5015
5016 Label success;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005017 __ Branch(&success, eq,
Ben Murdoch589d6972011-11-30 16:04:58 +00005018 v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Ben Murdoch257744e2011-11-30 15:57:28 +00005019 Label failure;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005020 __ Branch(&failure, eq,
Ben Murdoch589d6972011-11-30 16:04:58 +00005021 v0, Operand(NativeRegExpMacroAssembler::FAILURE));
Ben Murdoch257744e2011-11-30 15:57:28 +00005022 // If not exception it can only be retry. Handle that in the runtime system.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005023 __ Branch(&runtime, ne,
Ben Murdoch589d6972011-11-30 16:04:58 +00005024 v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
Ben Murdoch257744e2011-11-30 15:57:28 +00005025 // Result must now be exception. If there is no pending exception already a
5026 // stack overflow (on the backtrack stack) was detected in RegExp code but
5027 // haven't created the exception yet. Handle that in the runtime system.
5028 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005029 __ li(a1, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00005030 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005031 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00005032 __ lw(v0, MemOperand(a2, 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00005033 __ Branch(&runtime, eq, v0, Operand(a1));
Ben Murdoch257744e2011-11-30 15:57:28 +00005034
5035 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5036
5037 // Check if the exception is a termination. If so, throw as uncatchable.
5038 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5039 Label termination_exception;
Ben Murdoch589d6972011-11-30 16:04:58 +00005040 __ Branch(&termination_exception, eq, v0, Operand(a0));
Ben Murdoch257744e2011-11-30 15:57:28 +00005041
Ben Murdoch589d6972011-11-30 16:04:58 +00005042 __ Throw(v0); // Expects thrown value in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00005043
5044 __ bind(&termination_exception);
5045 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
5046
5047 __ bind(&failure);
5048 // For failure and exception return null.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005049 __ li(v0, Operand(isolate->factory()->null_value()));
Ben Murdoch257744e2011-11-30 15:57:28 +00005050 __ Addu(sp, sp, Operand(4 * kPointerSize));
5051 __ Ret();
5052
5053 // Process the result from the native regexp code.
5054 __ bind(&success);
5055 __ lw(a1,
5056 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5057 // Calculate number of capture registers (number_of_captures + 1) * 2.
5058 STATIC_ASSERT(kSmiTag == 0);
5059 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5060 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5061
5062 // a1: number of capture registers
5063 // subject: subject string
5064 // Store the capture count.
5065 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5066 __ sw(a2, FieldMemOperand(last_match_info_elements,
5067 RegExpImpl::kLastCaptureCountOffset));
5068 // Store last subject and last input.
Ben Murdoch257744e2011-11-30 15:57:28 +00005069 __ sw(subject,
5070 FieldMemOperand(last_match_info_elements,
5071 RegExpImpl::kLastSubjectOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005072 __ mov(a2, subject);
5073 __ RecordWriteField(last_match_info_elements,
5074 RegExpImpl::kLastSubjectOffset,
5075 a2,
5076 t3,
5077 kRAHasNotBeenSaved,
5078 kDontSaveFPRegs);
Ben Murdoch257744e2011-11-30 15:57:28 +00005079 __ sw(subject,
5080 FieldMemOperand(last_match_info_elements,
5081 RegExpImpl::kLastInputOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005082 __ RecordWriteField(last_match_info_elements,
5083 RegExpImpl::kLastInputOffset,
5084 subject,
5085 t3,
5086 kRAHasNotBeenSaved,
5087 kDontSaveFPRegs);
Ben Murdoch257744e2011-11-30 15:57:28 +00005088
5089 // Get the static offsets vector filled by the native regexp code.
5090 ExternalReference address_of_static_offsets_vector =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005091 ExternalReference::address_of_static_offsets_vector(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00005092 __ li(a2, Operand(address_of_static_offsets_vector));
5093
5094 // a1: number of capture registers
5095 // a2: offsets vector
5096 Label next_capture, done;
5097 // Capture register counter starts from number of capture registers and
5098 // counts down until wrapping after zero.
5099 __ Addu(a0,
5100 last_match_info_elements,
5101 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5102 __ bind(&next_capture);
5103 __ Subu(a1, a1, Operand(1));
5104 __ Branch(&done, lt, a1, Operand(zero_reg));
5105 // Read the value from the static offsets vector buffer.
5106 __ lw(a3, MemOperand(a2, 0));
5107 __ addiu(a2, a2, kPointerSize);
5108 // Store the smi value in the last match info.
5109 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5110 __ sw(a3, MemOperand(a0, 0));
5111 __ Branch(&next_capture, USE_DELAY_SLOT);
5112 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5113
5114 __ bind(&done);
5115
5116 // Return last match info.
5117 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5118 __ Addu(sp, sp, Operand(4 * kPointerSize));
5119 __ Ret();
5120
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005121 // External string. Short external strings have already been ruled out.
5122 // a0: scratch
5123 __ bind(&external_string);
5124 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5125 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5126 if (FLAG_debug_code) {
5127 // Assert that we do not have a cons or slice (indirect strings) here.
5128 // Sequential strings have already been ruled out.
5129 __ And(at, a0, Operand(kIsIndirectStringMask));
5130 __ Assert(eq,
5131 "external string expected, but not found",
5132 at,
5133 Operand(zero_reg));
5134 }
5135 __ lw(subject,
5136 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5137 // Move the pointer so that offset-wise, it looks like a sequential string.
5138 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5139 __ Subu(subject,
5140 subject,
5141 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
5142 __ jmp(&seq_string);
5143
Ben Murdoch257744e2011-11-30 15:57:28 +00005144 // Do the runtime call to execute the regexp.
5145 __ bind(&runtime);
5146 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5147#endif // V8_INTERPRETED_REGEXP
Steve Block44f0eee2011-05-26 01:26:41 +01005148}
5149
5150
5151void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005152 const int kMaxInlineLength = 100;
5153 Label slowcase;
5154 Label done;
5155 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5156 STATIC_ASSERT(kSmiTag == 0);
5157 STATIC_ASSERT(kSmiTagSize == 1);
5158 __ JumpIfNotSmi(a1, &slowcase);
5159 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5160 // Smi-tagging is equivalent to multiplying by 2.
5161 // Allocate RegExpResult followed by FixedArray with size in ebx.
5162 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5163 // Elements: [Map][Length][..elements..]
5164 // Size of JSArray with two in-object properties and the header of a
5165 // FixedArray.
5166 int objects_size =
5167 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5168 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5169 __ Addu(a2, t1, Operand(objects_size));
5170 __ AllocateInNewSpace(
5171 a2, // In: Size, in words.
5172 v0, // Out: Start of allocation (tagged).
5173 a3, // Scratch register.
5174 t0, // Scratch register.
5175 &slowcase,
5176 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5177 // v0: Start of allocated area, object-tagged.
5178 // a1: Number of elements in array, as smi.
5179 // t1: Number of elements, untagged.
5180
5181 // Set JSArray map to global.regexp_result_map().
5182 // Set empty properties FixedArray.
5183 // Set elements to point to FixedArray allocated right after the JSArray.
5184 // Interleave operations for better latency.
5185 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5186 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5187 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5188 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5189 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5190 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5191 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5192 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5193
5194 // Set input, index and length fields from arguments.
5195 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5196 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5197 __ lw(a1, MemOperand(sp, kPointerSize * 1));
5198 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5199 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5200 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
5201
5202 // Fill out the elements FixedArray.
5203 // v0: JSArray, tagged.
5204 // a3: FixedArray, tagged.
5205 // t1: Number of elements in array, untagged.
5206
5207 // Set map.
5208 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5209 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5210 // Set FixedArray length.
5211 __ sll(t2, t1, kSmiTagSize);
5212 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5213 // Fill contents of fixed-array with the-hole.
5214 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5215 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5216 // Fill fixed array elements with hole.
5217 // v0: JSArray, tagged.
5218 // a2: the hole.
5219 // a3: Start of elements in FixedArray.
5220 // t1: Number of elements to fill.
5221 Label loop;
5222 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5223 __ addu(t1, t1, a3); // Point past last element to store.
5224 __ bind(&loop);
5225 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5226 __ sw(a2, MemOperand(a3));
5227 __ Branch(&loop, USE_DELAY_SLOT);
5228 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5229
5230 __ bind(&done);
5231 __ Addu(sp, sp, Operand(3 * kPointerSize));
5232 __ Ret();
5233
5234 __ bind(&slowcase);
5235 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01005236}
5237
5238
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005239void CallFunctionStub::FinishCode(Handle<Code> code) {
5240 code->set_has_function_cache(false);
5241}
5242
5243
5244void CallFunctionStub::Clear(Heap* heap, Address address) {
5245 UNREACHABLE();
5246}
5247
5248
5249Object* CallFunctionStub::GetCachedValue(Address address) {
5250 UNREACHABLE();
5251 return NULL;
5252}
5253
5254
Steve Block44f0eee2011-05-26 01:26:41 +01005255void CallFunctionStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005256 // a1 : the function to call
5257 Label slow, non_function;
Ben Murdoch257744e2011-11-30 15:57:28 +00005258
5259 // The receiver might implicitly be the global object. This is
5260 // indicated by passing the hole as the receiver to the call
5261 // function stub.
5262 if (ReceiverMightBeImplicit()) {
5263 Label call;
5264 // Get the receiver from the stack.
5265 // function, receiver [, arguments]
5266 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5267 // Call as function is indicated with the hole.
5268 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5269 __ Branch(&call, ne, t0, Operand(at));
5270 // Patch the receiver on the stack with the global receiver object.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005271 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5272 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
5273 __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00005274 __ bind(&call);
5275 }
5276
Ben Murdoch257744e2011-11-30 15:57:28 +00005277 // Check that the function is really a JavaScript function.
5278 // a1: pushed function (to be verified)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005279 __ JumpIfSmi(a1, &non_function);
Ben Murdoch257744e2011-11-30 15:57:28 +00005280 // Get the map of the function object.
5281 __ GetObjectType(a1, a2, a2);
5282 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
5283
5284 // Fast-case: Invoke the function now.
5285 // a1: pushed function
5286 ParameterCount actual(argc_);
5287
5288 if (ReceiverMightBeImplicit()) {
5289 Label call_as_function;
5290 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5291 __ Branch(&call_as_function, eq, t0, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005292 __ InvokeFunction(a1,
5293 actual,
5294 JUMP_FUNCTION,
5295 NullCallWrapper(),
5296 CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00005297 __ bind(&call_as_function);
5298 }
5299 __ InvokeFunction(a1,
5300 actual,
5301 JUMP_FUNCTION,
5302 NullCallWrapper(),
5303 CALL_AS_FUNCTION);
5304
5305 // Slow-case: Non-function called.
5306 __ bind(&slow);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005307 // Check for function proxy.
5308 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5309 __ push(a1); // Put proxy as additional argument.
5310 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5311 __ li(a2, Operand(0, RelocInfo::NONE));
5312 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5313 __ SetCallKind(t1, CALL_AS_METHOD);
5314 {
5315 Handle<Code> adaptor =
5316 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5317 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5318 }
5319
Ben Murdoch257744e2011-11-30 15:57:28 +00005320 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5321 // of the original receiver from the call site).
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005322 __ bind(&non_function);
Ben Murdoch257744e2011-11-30 15:57:28 +00005323 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5324 __ li(a0, Operand(argc_)); // Setup the number of arguments.
5325 __ mov(a2, zero_reg);
5326 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005327 __ SetCallKind(t1, CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00005328 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5329 RelocInfo::CODE_TARGET);
Steve Block44f0eee2011-05-26 01:26:41 +01005330}
5331
5332
5333// Unfortunately you have to run without snapshots to see most of these
5334// names in the profile since most compare stubs end up in the snapshot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005335void CompareStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005336 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5337 (lhs_.is(a1) && rhs_.is(a0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00005338 const char* cc_name;
5339 switch (cc_) {
5340 case lt: cc_name = "LT"; break;
5341 case gt: cc_name = "GT"; break;
5342 case le: cc_name = "LE"; break;
5343 case ge: cc_name = "GE"; break;
5344 case eq: cc_name = "EQ"; break;
5345 case ne: cc_name = "NE"; break;
5346 default: cc_name = "UnknownCondition"; break;
5347 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005348 bool is_equality = cc_ == eq || cc_ == ne;
5349 stream->Add("CompareStub_%s", cc_name);
5350 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5351 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5352 if (strict_ && is_equality) stream->Add("_STRICT");
5353 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5354 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5355 if (!include_smi_compare_) stream->Add("_NO_SMI");
Steve Block44f0eee2011-05-26 01:26:41 +01005356}
5357
5358
5359int CompareStub::MinorKey() {
Ben Murdoch257744e2011-11-30 15:57:28 +00005360 // Encode the two parameters in a unique 16 bit value.
5361 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5362 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5363 (lhs_.is(a1) && rhs_.is(a0)));
5364 return ConditionField::encode(static_cast<unsigned>(cc_))
5365 | RegisterField::encode(lhs_.is(a0))
5366 | StrictField::encode(strict_)
5367 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5368 | IncludeSmiCompareField::encode(include_smi_compare_);
Steve Block44f0eee2011-05-26 01:26:41 +01005369}
5370
5371
Ben Murdoch257744e2011-11-30 15:57:28 +00005372// StringCharCodeAtGenerator.
Steve Block44f0eee2011-05-26 01:26:41 +01005373void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005374 Label flat_string;
5375 Label ascii_string;
5376 Label got_char_code;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005377 Label sliced_string;
Ben Murdoch257744e2011-11-30 15:57:28 +00005378
Ben Murdoch257744e2011-11-30 15:57:28 +00005379 ASSERT(!t0.is(index_));
5380 ASSERT(!t0.is(result_));
5381 ASSERT(!t0.is(object_));
5382
5383 // If the receiver is a smi trigger the non-string case.
5384 __ JumpIfSmi(object_, receiver_not_string_);
5385
5386 // Fetch the instance type of the receiver into result register.
5387 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5388 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5389 // If the receiver is not a string trigger the non-string case.
5390 __ And(t0, result_, Operand(kIsNotStringMask));
5391 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5392
5393 // If the index is non-smi trigger the non-smi case.
5394 __ JumpIfNotSmi(index_, &index_not_smi_);
5395
Ben Murdoch257744e2011-11-30 15:57:28 +00005396 __ bind(&got_smi_index_);
5397
5398 // Check for index out of range.
5399 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005400 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
Ben Murdoch257744e2011-11-30 15:57:28 +00005401
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005402 __ sra(index_, index_, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00005403
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005404 StringCharLoadGenerator::Generate(masm,
5405 object_,
5406 index_,
5407 result_,
5408 &call_runtime_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005409
Ben Murdoch257744e2011-11-30 15:57:28 +00005410 __ sll(result_, result_, kSmiTagSize);
5411 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005412}
5413
5414
5415void StringCharCodeAtGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005416 MacroAssembler* masm,
5417 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005418 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5419
5420 // Index is not a smi.
5421 __ bind(&index_not_smi_);
5422 // If index is a heap number, try converting it to an integer.
5423 __ CheckMap(index_,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005424 result_,
Ben Murdoch257744e2011-11-30 15:57:28 +00005425 Heap::kHeapNumberMapRootIndex,
5426 index_not_number_,
5427 DONT_DO_SMI_CHECK);
5428 call_helper.BeforeCall(masm);
5429 // Consumed by runtime conversion function:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005430 __ Push(object_, index_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005431 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5432 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5433 } else {
5434 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5435 // NumberToSmi discards numbers that are not exact integers.
5436 __ CallRuntime(Runtime::kNumberToSmi, 1);
5437 }
5438
5439 // Save the conversion result before the pop instructions below
5440 // have a chance to overwrite it.
5441
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005442 __ Move(index_, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005443 __ pop(object_);
5444 // Reload the instance type.
5445 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5446 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5447 call_helper.AfterCall(masm);
5448 // If index is still not a smi, it must be out of range.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005449 __ JumpIfNotSmi(index_, index_out_of_range_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005450 // Otherwise, return to the fast path.
5451 __ Branch(&got_smi_index_);
5452
5453 // Call runtime. We get here when the receiver is a string and the
5454 // index is a number, but the code of getting the actual character
5455 // is too complex (e.g., when the string needs to be flattened).
5456 __ bind(&call_runtime_);
5457 call_helper.BeforeCall(masm);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005458 __ sll(index_, index_, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00005459 __ Push(object_, index_);
5460 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5461
5462 __ Move(result_, v0);
5463
5464 call_helper.AfterCall(masm);
5465 __ jmp(&exit_);
5466
5467 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005468}
5469
5470
5471// -------------------------------------------------------------------------
5472// StringCharFromCodeGenerator
5473
5474void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005475 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5476
5477 ASSERT(!t0.is(result_));
5478 ASSERT(!t0.is(code_));
5479
5480 STATIC_ASSERT(kSmiTag == 0);
5481 STATIC_ASSERT(kSmiShiftSize == 0);
5482 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5483 __ And(t0,
5484 code_,
5485 Operand(kSmiTagMask |
5486 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5487 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5488
5489 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5490 // At this point code register contains smi tagged ASCII char code.
5491 STATIC_ASSERT(kSmiTag == 0);
5492 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5493 __ Addu(result_, result_, t0);
5494 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5495 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5496 __ Branch(&slow_case_, eq, result_, Operand(t0));
5497 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005498}
5499
5500
5501void StringCharFromCodeGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005502 MacroAssembler* masm,
5503 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005504 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5505
5506 __ bind(&slow_case_);
5507 call_helper.BeforeCall(masm);
5508 __ push(code_);
5509 __ CallRuntime(Runtime::kCharFromCode, 1);
5510 __ Move(result_, v0);
5511
5512 call_helper.AfterCall(masm);
5513 __ Branch(&exit_);
5514
5515 __ Abort("Unexpected fallthrough from CharFromCode slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005516}
5517
5518
5519// -------------------------------------------------------------------------
5520// StringCharAtGenerator
5521
5522void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005523 char_code_at_generator_.GenerateFast(masm);
5524 char_from_code_generator_.GenerateFast(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01005525}
5526
5527
5528void StringCharAtGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005529 MacroAssembler* masm,
5530 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005531 char_code_at_generator_.GenerateSlow(masm, call_helper);
5532 char_from_code_generator_.GenerateSlow(masm, call_helper);
Steve Block44f0eee2011-05-26 01:26:41 +01005533}
5534
5535
Steve Block44f0eee2011-05-26 01:26:41 +01005536void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5537 Register dest,
5538 Register src,
5539 Register count,
5540 Register scratch,
5541 bool ascii) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005542 Label loop;
5543 Label done;
5544 // This loop just copies one character at a time, as it is only used for
5545 // very short strings.
5546 if (!ascii) {
5547 __ addu(count, count, count);
5548 }
5549 __ Branch(&done, eq, count, Operand(zero_reg));
5550 __ addu(count, dest, count); // Count now points to the last dest byte.
5551
5552 __ bind(&loop);
5553 __ lbu(scratch, MemOperand(src));
5554 __ addiu(src, src, 1);
5555 __ sb(scratch, MemOperand(dest));
5556 __ addiu(dest, dest, 1);
5557 __ Branch(&loop, lt, dest, Operand(count));
5558
5559 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005560}
5561
5562
5563enum CopyCharactersFlags {
5564 COPY_ASCII = 1,
5565 DEST_ALWAYS_ALIGNED = 2
5566};
5567
5568
5569void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5570 Register dest,
5571 Register src,
5572 Register count,
5573 Register scratch1,
5574 Register scratch2,
5575 Register scratch3,
5576 Register scratch4,
5577 Register scratch5,
5578 int flags) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005579 bool ascii = (flags & COPY_ASCII) != 0;
5580 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5581
5582 if (dest_always_aligned && FLAG_debug_code) {
5583 // Check that destination is actually word aligned if the flag says
5584 // that it is.
5585 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5586 __ Check(eq,
5587 "Destination of copy not aligned.",
5588 scratch4,
5589 Operand(zero_reg));
5590 }
5591
5592 const int kReadAlignment = 4;
5593 const int kReadAlignmentMask = kReadAlignment - 1;
5594 // Ensure that reading an entire aligned word containing the last character
5595 // of a string will not read outside the allocated area (because we pad up
5596 // to kObjectAlignment).
5597 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5598 // Assumes word reads and writes are little endian.
5599 // Nothing to do for zero characters.
5600 Label done;
5601
5602 if (!ascii) {
5603 __ addu(count, count, count);
5604 }
5605 __ Branch(&done, eq, count, Operand(zero_reg));
5606
5607 Label byte_loop;
5608 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5609 __ Subu(scratch1, count, Operand(8));
5610 __ Addu(count, dest, Operand(count));
5611 Register limit = count; // Read until src equals this.
5612 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5613
5614 if (!dest_always_aligned) {
5615 // Align dest by byte copying. Copies between zero and three bytes.
5616 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5617 Label dest_aligned;
5618 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5619 Label aligned_loop;
5620 __ bind(&aligned_loop);
5621 __ lbu(scratch1, MemOperand(src));
5622 __ addiu(src, src, 1);
5623 __ sb(scratch1, MemOperand(dest));
5624 __ addiu(dest, dest, 1);
5625 __ addiu(scratch4, scratch4, 1);
5626 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5627 __ bind(&dest_aligned);
5628 }
5629
5630 Label simple_loop;
5631
5632 __ And(scratch4, src, Operand(kReadAlignmentMask));
5633 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5634
5635 // Loop for src/dst that are not aligned the same way.
5636 // This loop uses lwl and lwr instructions. These instructions
5637 // depend on the endianness, and the implementation assumes little-endian.
5638 {
5639 Label loop;
5640 __ bind(&loop);
5641 __ lwr(scratch1, MemOperand(src));
5642 __ Addu(src, src, Operand(kReadAlignment));
5643 __ lwl(scratch1, MemOperand(src, -1));
5644 __ sw(scratch1, MemOperand(dest));
5645 __ Addu(dest, dest, Operand(kReadAlignment));
5646 __ Subu(scratch2, limit, dest);
5647 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5648 }
5649
5650 __ Branch(&byte_loop);
5651
5652 // Simple loop.
5653 // Copy words from src to dest, until less than four bytes left.
5654 // Both src and dest are word aligned.
5655 __ bind(&simple_loop);
5656 {
5657 Label loop;
5658 __ bind(&loop);
5659 __ lw(scratch1, MemOperand(src));
5660 __ Addu(src, src, Operand(kReadAlignment));
5661 __ sw(scratch1, MemOperand(dest));
5662 __ Addu(dest, dest, Operand(kReadAlignment));
5663 __ Subu(scratch2, limit, dest);
5664 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5665 }
5666
5667 // Copy bytes from src to dest until dest hits limit.
5668 __ bind(&byte_loop);
5669 // Test if dest has already reached the limit.
5670 __ Branch(&done, ge, dest, Operand(limit));
5671 __ lbu(scratch1, MemOperand(src));
5672 __ addiu(src, src, 1);
5673 __ sb(scratch1, MemOperand(dest));
5674 __ addiu(dest, dest, 1);
5675 __ Branch(&byte_loop);
5676
5677 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005678}
5679
5680
5681void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5682 Register c1,
5683 Register c2,
5684 Register scratch1,
5685 Register scratch2,
5686 Register scratch3,
5687 Register scratch4,
5688 Register scratch5,
5689 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005690 // Register scratch3 is the general scratch register in this function.
5691 Register scratch = scratch3;
5692
5693 // Make sure that both characters are not digits as such strings has a
5694 // different hash algorithm. Don't try to look for these in the symbol table.
5695 Label not_array_index;
5696 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5697 __ Branch(&not_array_index,
5698 Ugreater,
5699 scratch,
5700 Operand(static_cast<int>('9' - '0')));
5701 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5702
5703 // If check failed combine both characters into single halfword.
5704 // This is required by the contract of the method: code at the
5705 // not_found branch expects this combination in c1 register.
5706 Label tmp;
5707 __ sll(scratch1, c2, kBitsPerByte);
5708 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5709 __ Or(c1, c1, scratch1);
5710 __ bind(&tmp);
5711 __ Branch(not_found,
5712 Uless_equal,
5713 scratch,
5714 Operand(static_cast<int>('9' - '0')));
5715
5716 __ bind(&not_array_index);
5717 // Calculate the two character string hash.
5718 Register hash = scratch1;
5719 StringHelper::GenerateHashInit(masm, hash, c1);
5720 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5721 StringHelper::GenerateHashGetHash(masm, hash);
5722
5723 // Collect the two characters in a register.
5724 Register chars = c1;
5725 __ sll(scratch, c2, kBitsPerByte);
5726 __ Or(chars, chars, scratch);
5727
5728 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5729 // hash: hash of two character string.
5730
5731 // Load symbol table.
5732 // Load address of first element of the symbol table.
5733 Register symbol_table = c2;
5734 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5735
5736 Register undefined = scratch4;
5737 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5738
5739 // Calculate capacity mask from the symbol table capacity.
5740 Register mask = scratch2;
5741 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5742 __ sra(mask, mask, 1);
5743 __ Addu(mask, mask, -1);
5744
5745 // Calculate untagged address of the first element of the symbol table.
5746 Register first_symbol_table_element = symbol_table;
5747 __ Addu(first_symbol_table_element, symbol_table,
5748 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5749
5750 // Registers.
5751 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5752 // hash: hash of two character string
5753 // mask: capacity mask
5754 // first_symbol_table_element: address of the first element of
5755 // the symbol table
5756 // undefined: the undefined object
5757 // scratch: -
5758
5759 // Perform a number of probes in the symbol table.
5760 static const int kProbes = 4;
5761 Label found_in_symbol_table;
5762 Label next_probe[kProbes];
5763 Register candidate = scratch5; // Scratch register contains candidate.
5764 for (int i = 0; i < kProbes; i++) {
5765 // Calculate entry in symbol table.
5766 if (i > 0) {
5767 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5768 } else {
5769 __ mov(candidate, hash);
5770 }
5771
5772 __ And(candidate, candidate, Operand(mask));
5773
5774 // Load the entry from the symble table.
5775 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5776 __ sll(scratch, candidate, kPointerSizeLog2);
5777 __ Addu(scratch, scratch, first_symbol_table_element);
5778 __ lw(candidate, MemOperand(scratch));
5779
5780 // If entry is undefined no string with this hash can be found.
5781 Label is_string;
5782 __ GetObjectType(candidate, scratch, scratch);
5783 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5784
5785 __ Branch(not_found, eq, undefined, Operand(candidate));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005786 // Must be the hole (deleted entry).
Ben Murdoch257744e2011-11-30 15:57:28 +00005787 if (FLAG_debug_code) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005788 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
5789 __ Assert(eq, "oddball in symbol table is not undefined or the hole",
Ben Murdoch257744e2011-11-30 15:57:28 +00005790 scratch, Operand(candidate));
5791 }
5792 __ jmp(&next_probe[i]);
5793
5794 __ bind(&is_string);
5795
5796 // Check that the candidate is a non-external ASCII string. The instance
5797 // type is still in the scratch register from the CompareObjectType
5798 // operation.
5799 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5800
5801 // If length is not 2 the string is not a candidate.
5802 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5803 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5804
5805 // Check if the two characters match.
5806 // Assumes that word load is little endian.
5807 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5808 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5809 __ bind(&next_probe[i]);
5810 }
5811
5812 // No matching 2 character string found by probing.
5813 __ jmp(not_found);
5814
5815 // Scratch register contains result when we fall through to here.
5816 Register result = candidate;
5817 __ bind(&found_in_symbol_table);
5818 __ mov(v0, result);
Steve Block44f0eee2011-05-26 01:26:41 +01005819}
5820
5821
5822void StringHelper::GenerateHashInit(MacroAssembler* masm,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005823 Register hash,
5824 Register character) {
5825 // hash = character + (character << 10);
5826 __ sll(hash, character, 10);
Ben Murdoch257744e2011-11-30 15:57:28 +00005827 __ addu(hash, hash, character);
5828 // hash ^= hash >> 6;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005829 __ srl(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005830 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005831}
5832
5833
5834void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005835 Register hash,
5836 Register character) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005837 // hash += character;
5838 __ addu(hash, hash, character);
5839 // hash += hash << 10;
5840 __ sll(at, hash, 10);
5841 __ addu(hash, hash, at);
5842 // hash ^= hash >> 6;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005843 __ srl(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005844 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005845}
5846
5847
5848void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005849 Register hash) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005850 // hash += hash << 3;
5851 __ sll(at, hash, 3);
5852 __ addu(hash, hash, at);
5853 // hash ^= hash >> 11;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005854 __ srl(at, hash, 11);
Ben Murdoch257744e2011-11-30 15:57:28 +00005855 __ xor_(hash, hash, at);
5856 // hash += hash << 15;
5857 __ sll(at, hash, 15);
5858 __ addu(hash, hash, at);
5859
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005860 uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
5861 __ li(at, Operand(kHashShiftCutOffMask));
5862 __ and_(hash, hash, at);
5863
Ben Murdoch257744e2011-11-30 15:57:28 +00005864 // if (hash == 0) hash = 27;
5865 __ ori(at, zero_reg, 27);
5866 __ movz(hash, at, hash);
Steve Block44f0eee2011-05-26 01:26:41 +01005867}
5868
5869
5870void SubStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005871 Label sub_string_runtime;
5872 // Stack frame on entry.
5873 // ra: return address
5874 // sp[0]: to
5875 // sp[4]: from
5876 // sp[8]: string
5877
5878 // This stub is called from the native-call %_SubString(...), so
5879 // nothing can be assumed about the arguments. It is tested that:
5880 // "string" is a sequential string,
5881 // both "from" and "to" are smis, and
5882 // 0 <= from <= to <= string.length.
5883 // If any of these assumptions fail, we call the runtime system.
5884
5885 static const int kToOffset = 0 * kPointerSize;
5886 static const int kFromOffset = 1 * kPointerSize;
5887 static const int kStringOffset = 2 * kPointerSize;
5888
5889 Register to = t2;
5890 Register from = t3;
5891
5892 // Check bounds and smi-ness.
5893 __ lw(to, MemOperand(sp, kToOffset));
5894 __ lw(from, MemOperand(sp, kFromOffset));
5895 STATIC_ASSERT(kFromOffset == kToOffset + 4);
5896 STATIC_ASSERT(kSmiTag == 0);
5897 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5898
5899 __ JumpIfNotSmi(from, &sub_string_runtime);
5900 __ JumpIfNotSmi(to, &sub_string_runtime);
5901
5902 __ sra(a3, from, kSmiTagSize); // Remove smi tag.
5903 __ sra(t5, to, kSmiTagSize); // Remove smi tag.
5904
5905 // a3: from index (untagged smi)
5906 // t5: to index (untagged smi)
5907
5908 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
5909
5910 __ subu(a2, t5, a3);
5911 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
5912
5913 // Special handling of sub-strings of length 1 and 2. One character strings
5914 // are handled in the runtime system (looked up in the single character
Ben Murdoch589d6972011-11-30 16:04:58 +00005915 // cache). Two character strings are looked for in the symbol cache in
5916 // generated code.
Ben Murdoch257744e2011-11-30 15:57:28 +00005917 __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5918
5919 // Both to and from are smis.
5920
5921 // a2: result string length
5922 // a3: from index (untagged smi)
5923 // t2: (a.k.a. to): to (smi)
5924 // t3: (a.k.a. from): from offset (smi)
5925 // t5: to index (untagged smi)
5926
5927 // Make sure first argument is a sequential (or flat) string.
Ben Murdoch589d6972011-11-30 16:04:58 +00005928 __ lw(v0, MemOperand(sp, kStringOffset));
5929 __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00005930
Ben Murdoch589d6972011-11-30 16:04:58 +00005931 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005932 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
Ben Murdoch589d6972011-11-30 16:04:58 +00005933 __ And(t4, v0, Operand(kIsNotStringMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00005934
5935 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
5936
Ben Murdoch589d6972011-11-30 16:04:58 +00005937 // Short-cut for the case of trivial substring.
5938 Label return_v0;
5939 // v0: original string
5940 // a2: result string length
5941 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
5942 __ sra(t0, t0, 1);
5943 __ Branch(&return_v0, eq, a2, Operand(t0));
5944
5945 Label create_slice;
5946 if (FLAG_string_slices) {
5947 __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
5948 }
5949
5950 // v0: original string
Ben Murdoch257744e2011-11-30 15:57:28 +00005951 // a1: instance type
5952 // a2: result string length
5953 // a3: from index (untagged smi)
Ben Murdoch257744e2011-11-30 15:57:28 +00005954 // t2: (a.k.a. to): to (smi)
5955 // t3: (a.k.a. from): from offset (smi)
5956 // t5: to index (untagged smi)
5957
5958 Label seq_string;
5959 __ And(t0, a1, Operand(kStringRepresentationMask));
5960 STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5961 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
Ben Murdoch589d6972011-11-30 16:04:58 +00005962 STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
Ben Murdoch257744e2011-11-30 15:57:28 +00005963
Ben Murdoch589d6972011-11-30 16:04:58 +00005964 // Slices and external strings go to runtime.
Ben Murdoch257744e2011-11-30 15:57:28 +00005965 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
5966
5967 // Sequential strings are handled directly.
5968 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
5969
5970 // Cons string. Try to recurse (once) on the first substring.
5971 // (This adds a little more generality than necessary to handle flattened
5972 // cons strings, but not much).
Ben Murdoch589d6972011-11-30 16:04:58 +00005973 __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
5974 __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005975 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5976 STATIC_ASSERT(kSeqStringTag == 0);
Ben Murdoch589d6972011-11-30 16:04:58 +00005977 // Cons, slices and external strings go to runtime.
Ben Murdoch257744e2011-11-30 15:57:28 +00005978 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
5979
5980 // Definitly a sequential string.
5981 __ bind(&seq_string);
5982
Ben Murdoch589d6972011-11-30 16:04:58 +00005983 // v0: original string
Ben Murdoch257744e2011-11-30 15:57:28 +00005984 // a1: instance type
5985 // a2: result string length
5986 // a3: from index (untagged smi)
Ben Murdoch257744e2011-11-30 15:57:28 +00005987 // t2: (a.k.a. to): to (smi)
5988 // t3: (a.k.a. from): from offset (smi)
5989 // t5: to index (untagged smi)
5990
Ben Murdoch589d6972011-11-30 16:04:58 +00005991 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005992 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
5993 to = no_reg;
5994
Ben Murdoch589d6972011-11-30 16:04:58 +00005995 // v0: original string or left hand side of the original cons string.
Ben Murdoch257744e2011-11-30 15:57:28 +00005996 // a1: instance type
5997 // a2: result string length
5998 // a3: from index (untagged smi)
Ben Murdoch257744e2011-11-30 15:57:28 +00005999 // t3: (a.k.a. from): from offset (smi)
6000 // t5: to index (untagged smi)
6001
6002 // Check for flat ASCII string.
6003 Label non_ascii_flat;
6004 STATIC_ASSERT(kTwoByteStringTag == 0);
6005
6006 __ And(t4, a1, Operand(kStringEncodingMask));
6007 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
6008
6009 Label result_longer_than_two;
6010 __ Branch(&result_longer_than_two, gt, a2, Operand(2));
6011
6012 // Sub string of length 2 requested.
6013 // Get the two characters forming the sub string.
Ben Murdoch589d6972011-11-30 16:04:58 +00006014 __ Addu(v0, v0, Operand(a3));
6015 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6016 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
Ben Murdoch257744e2011-11-30 15:57:28 +00006017
6018 // Try to lookup two character string in symbol table.
6019 Label make_two_character_string;
6020 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6021 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
6022 Counters* counters = masm->isolate()->counters();
Ben Murdoch589d6972011-11-30 16:04:58 +00006023 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006024
6025 // a2: result string length.
6026 // a3: two characters combined into halfword in little endian byte order.
6027 __ bind(&make_two_character_string);
6028 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
6029 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00006030 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006031
6032 __ bind(&result_longer_than_two);
6033
Ben Murdoch589d6972011-11-30 16:04:58 +00006034 // Locate 'from' character of string.
6035 __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6036 __ sra(t4, from, 1);
6037 __ Addu(t1, t1, t4);
6038
Ben Murdoch257744e2011-11-30 15:57:28 +00006039 // Allocate the result.
6040 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
6041
Ben Murdoch589d6972011-11-30 16:04:58 +00006042 // v0: result string
6043 // a2: result string length
Ben Murdoch257744e2011-11-30 15:57:28 +00006044 // a3: from index (untagged smi)
Ben Murdoch589d6972011-11-30 16:04:58 +00006045 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00006046 // t3: (a.k.a. from): from offset (smi)
6047 // Locate first character of result.
6048 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00006049
Ben Murdoch589d6972011-11-30 16:04:58 +00006050 // v0: result string
6051 // a1: first character of result string
6052 // a2: result string length
6053 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00006054 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6055 StringHelper::GenerateCopyCharactersLong(
6056 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
Ben Murdoch589d6972011-11-30 16:04:58 +00006057 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006058
6059 __ bind(&non_ascii_flat);
Ben Murdoch589d6972011-11-30 16:04:58 +00006060 // a2: result string length
6061 // t1: string
Ben Murdoch257744e2011-11-30 15:57:28 +00006062 // t3: (a.k.a. from): from offset (smi)
6063 // Check for flat two byte string.
6064
Ben Murdoch589d6972011-11-30 16:04:58 +00006065 // Locate 'from' character of string.
6066 __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6067 // As "from" is a smi it is 2 times the value which matches the size of a two
6068 // byte character.
6069 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6070 __ Addu(t1, t1, Operand(from));
6071
Ben Murdoch257744e2011-11-30 15:57:28 +00006072 // Allocate the result.
6073 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
6074
Ben Murdoch589d6972011-11-30 16:04:58 +00006075 // v0: result string
6076 // a2: result string length
6077 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00006078 // Locate first character of result.
6079 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
Ben Murdoch589d6972011-11-30 16:04:58 +00006080
Ben Murdoch257744e2011-11-30 15:57:28 +00006081 from = no_reg;
6082
6083 // v0: result string.
6084 // a1: first character of result.
6085 // a2: result length.
Ben Murdoch589d6972011-11-30 16:04:58 +00006086 // t1: first character of substring to copy.
Ben Murdoch257744e2011-11-30 15:57:28 +00006087 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6088 StringHelper::GenerateCopyCharactersLong(
6089 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
Ben Murdoch589d6972011-11-30 16:04:58 +00006090 __ jmp(&return_v0);
6091
6092 if (FLAG_string_slices) {
6093 __ bind(&create_slice);
6094 // v0: original string
6095 // a1: instance type
6096 // a2: length
6097 // a3: from index (untagged smi)
6098 // t2 (a.k.a. to): to (smi)
6099 // t3 (a.k.a. from): from offset (smi)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006100 Label allocate_slice, sliced_string, seq_or_external_string;
6101 // If the string is not indirect, it can only be sequential or external.
Ben Murdoch589d6972011-11-30 16:04:58 +00006102 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6103 STATIC_ASSERT(kIsIndirectStringMask != 0);
6104 __ And(t4, a1, Operand(kIsIndirectStringMask));
6105 // External string. Jump to runtime.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006106 __ Branch(&seq_or_external_string, eq, t4, Operand(zero_reg));
Ben Murdoch589d6972011-11-30 16:04:58 +00006107
6108 __ And(t4, a1, Operand(kSlicedNotConsMask));
6109 __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
6110 // Cons string. Check whether it is flat, then fetch first part.
6111 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6112 __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
6113 __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
6114 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6115 __ jmp(&allocate_slice);
6116
6117 __ bind(&sliced_string);
6118 // Sliced string. Fetch parent and correct start index by offset.
6119 __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6120 __ addu(t3, t3, t1);
6121 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6122 __ jmp(&allocate_slice);
6123
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006124 __ bind(&seq_or_external_string);
6125 // Sequential or external string. Just move string to the correct register.
Ben Murdoch589d6972011-11-30 16:04:58 +00006126 __ mov(t1, v0);
6127
6128 __ bind(&allocate_slice);
6129 // a1: instance type of original string
6130 // a2: length
6131 // t1: underlying subject string
6132 // t3 (a.k.a. from): from offset (smi)
6133 // Allocate new sliced string. At this point we do not reload the instance
6134 // type including the string encoding because we simply rely on the info
6135 // provided by the original string. It does not matter if the original
6136 // string's encoding is wrong because we always have to recheck encoding of
6137 // the newly created string's parent anyways due to externalized strings.
6138 Label two_byte_slice, set_slice_header;
6139 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6140 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6141 __ And(t4, a1, Operand(kStringEncodingMask));
6142 __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
6143 __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
6144 __ jmp(&set_slice_header);
6145 __ bind(&two_byte_slice);
6146 __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
6147 __ bind(&set_slice_header);
6148 __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6149 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6150 }
6151
6152 __ bind(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006153 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6154 __ Addu(sp, sp, Operand(3 * kPointerSize));
6155 __ Ret();
6156
6157 // Just jump to runtime to create the sub string.
6158 __ bind(&sub_string_runtime);
6159 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6160}
6161
6162
6163void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6164 Register left,
6165 Register right,
6166 Register scratch1,
6167 Register scratch2,
6168 Register scratch3) {
6169 Register length = scratch1;
6170
6171 // Compare lengths.
6172 Label strings_not_equal, check_zero_length;
6173 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6174 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6175 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6176 __ bind(&strings_not_equal);
6177 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6178 __ Ret();
6179
6180 // Check if the length is zero.
6181 Label compare_chars;
6182 __ bind(&check_zero_length);
6183 STATIC_ASSERT(kSmiTag == 0);
6184 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6185 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6186 __ Ret();
6187
6188 // Compare characters.
6189 __ bind(&compare_chars);
6190
6191 GenerateAsciiCharsCompareLoop(masm,
6192 left, right, length, scratch2, scratch3, v0,
6193 &strings_not_equal);
6194
6195 // Characters are equal.
6196 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6197 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01006198}
6199
6200
6201void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Steve Block44f0eee2011-05-26 01:26:41 +01006202 Register left,
Ben Murdoch257744e2011-11-30 15:57:28 +00006203 Register right,
Steve Block44f0eee2011-05-26 01:26:41 +01006204 Register scratch1,
6205 Register scratch2,
6206 Register scratch3,
6207 Register scratch4) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006208 Label result_not_equal, compare_lengths;
6209 // Find minimum length and length difference.
6210 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6211 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6212 __ Subu(scratch3, scratch1, Operand(scratch2));
6213 Register length_delta = scratch3;
6214 __ slt(scratch4, scratch2, scratch1);
6215 __ movn(scratch1, scratch2, scratch4);
6216 Register min_length = scratch1;
6217 STATIC_ASSERT(kSmiTag == 0);
6218 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6219
6220 // Compare loop.
6221 GenerateAsciiCharsCompareLoop(masm,
6222 left, right, min_length, scratch2, scratch4, v0,
6223 &result_not_equal);
6224
6225 // Compare lengths - strings up to min-length are equal.
6226 __ bind(&compare_lengths);
6227 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6228 // Use length_delta as result if it's zero.
6229 __ mov(scratch2, length_delta);
6230 __ mov(scratch4, zero_reg);
6231 __ mov(v0, zero_reg);
6232
6233 __ bind(&result_not_equal);
6234 // Conditionally update the result based either on length_delta or
6235 // the last comparion performed in the loop above.
6236 Label ret;
6237 __ Branch(&ret, eq, scratch2, Operand(scratch4));
6238 __ li(v0, Operand(Smi::FromInt(GREATER)));
6239 __ Branch(&ret, gt, scratch2, Operand(scratch4));
6240 __ li(v0, Operand(Smi::FromInt(LESS)));
6241 __ bind(&ret);
6242 __ Ret();
6243}
6244
6245
6246void StringCompareStub::GenerateAsciiCharsCompareLoop(
6247 MacroAssembler* masm,
6248 Register left,
6249 Register right,
6250 Register length,
6251 Register scratch1,
6252 Register scratch2,
6253 Register scratch3,
6254 Label* chars_not_equal) {
6255 // Change index to run from -length to -1 by adding length to string
6256 // start. This means that loop ends when index reaches zero, which
6257 // doesn't need an additional compare.
6258 __ SmiUntag(length);
6259 __ Addu(scratch1, length,
6260 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6261 __ Addu(left, left, Operand(scratch1));
6262 __ Addu(right, right, Operand(scratch1));
6263 __ Subu(length, zero_reg, length);
6264 Register index = length; // index = -length;
6265
6266
6267 // Compare loop.
6268 Label loop;
6269 __ bind(&loop);
6270 __ Addu(scratch3, left, index);
6271 __ lbu(scratch1, MemOperand(scratch3));
6272 __ Addu(scratch3, right, index);
6273 __ lbu(scratch2, MemOperand(scratch3));
6274 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6275 __ Addu(index, index, 1);
6276 __ Branch(&loop, ne, index, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01006277}
6278
6279
6280void StringCompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006281 Label runtime;
6282
6283 Counters* counters = masm->isolate()->counters();
6284
6285 // Stack frame on entry.
6286 // sp[0]: right string
6287 // sp[4]: left string
6288 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6289 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6290
6291 Label not_same;
6292 __ Branch(&not_same, ne, a0, Operand(a1));
6293 STATIC_ASSERT(EQUAL == 0);
6294 STATIC_ASSERT(kSmiTag == 0);
6295 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6296 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6297 __ Addu(sp, sp, Operand(2 * kPointerSize));
6298 __ Ret();
6299
6300 __ bind(&not_same);
6301
6302 // Check that both objects are sequential ASCII strings.
6303 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6304
6305 // Compare flat ASCII strings natively. Remove arguments from stack first.
6306 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6307 __ Addu(sp, sp, Operand(2 * kPointerSize));
6308 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6309
6310 __ bind(&runtime);
6311 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01006312}
6313
6314
6315void StringAddStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006316 Label string_add_runtime, call_builtin;
6317 Builtins::JavaScript builtin_id = Builtins::ADD;
6318
6319 Counters* counters = masm->isolate()->counters();
6320
6321 // Stack on entry:
6322 // sp[0]: second argument (right).
6323 // sp[4]: first argument (left).
6324
6325 // Load the two arguments.
6326 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6327 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6328
6329 // Make sure that both arguments are strings if not known in advance.
6330 if (flags_ == NO_STRING_ADD_FLAGS) {
6331 __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
6332 // Load instance types.
6333 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6334 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6335 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6336 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6337 STATIC_ASSERT(kStringTag == 0);
6338 // If either is not a string, go to runtime.
6339 __ Or(t4, t0, Operand(t1));
6340 __ And(t4, t4, Operand(kIsNotStringMask));
6341 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6342 } else {
6343 // Here at least one of the arguments is definitely a string.
6344 // We convert the one that is not known to be a string.
6345 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6346 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6347 GenerateConvertArgument(
6348 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6349 builtin_id = Builtins::STRING_ADD_RIGHT;
6350 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6351 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6352 GenerateConvertArgument(
6353 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6354 builtin_id = Builtins::STRING_ADD_LEFT;
6355 }
6356 }
6357
6358 // Both arguments are strings.
6359 // a0: first string
6360 // a1: second string
6361 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6362 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6363 {
6364 Label strings_not_empty;
6365 // Check if either of the strings are empty. In that case return the other.
6366 // These tests use zero-length check on string-length whch is an Smi.
6367 // Assert that Smi::FromInt(0) is really 0.
6368 STATIC_ASSERT(kSmiTag == 0);
6369 ASSERT(Smi::FromInt(0) == 0);
6370 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6371 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6372 __ mov(v0, a0); // Assume we'll return first string (from a0).
6373 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
6374 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6375 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6376 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6377 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6378
6379 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6380 __ Addu(sp, sp, Operand(2 * kPointerSize));
6381 __ Ret();
6382
6383 __ bind(&strings_not_empty);
6384 }
6385
6386 // Untag both string-lengths.
6387 __ sra(a2, a2, kSmiTagSize);
6388 __ sra(a3, a3, kSmiTagSize);
6389
6390 // Both strings are non-empty.
6391 // a0: first string
6392 // a1: second string
6393 // a2: length of first string
6394 // a3: length of second string
6395 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6396 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6397 // Look at the length of the result of adding the two strings.
6398 Label string_add_flat_result, longer_than_two;
6399 // Adding two lengths can't overflow.
6400 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6401 __ Addu(t2, a2, Operand(a3));
6402 // Use the symbol table when adding two one character strings, as it
6403 // helps later optimizations to return a symbol here.
6404 __ Branch(&longer_than_two, ne, t2, Operand(2));
6405
6406 // Check that both strings are non-external ASCII strings.
6407 if (flags_ != NO_STRING_ADD_FLAGS) {
6408 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6409 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6410 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6411 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6412 }
6413 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6414 &string_add_runtime);
6415
6416 // Get the two characters forming the sub string.
6417 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6418 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6419
6420 // Try to lookup two character string in symbol table. If it is not found
6421 // just allocate a new one.
6422 Label make_two_character_string;
6423 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6424 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
6425 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6426 __ Addu(sp, sp, Operand(2 * kPointerSize));
6427 __ Ret();
6428
6429 __ bind(&make_two_character_string);
6430 // Resulting string has length 2 and first chars of two strings
6431 // are combined into single halfword in a2 register.
6432 // So we can fill resulting string without two loops by a single
6433 // halfword store instruction (which assumes that processor is
6434 // in a little endian mode).
6435 __ li(t2, Operand(2));
6436 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
6437 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6438 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6439 __ Addu(sp, sp, Operand(2 * kPointerSize));
6440 __ Ret();
6441
6442 __ bind(&longer_than_two);
6443 // Check if resulting string will be flat.
6444 __ Branch(&string_add_flat_result, lt, t2,
6445 Operand(String::kMinNonFlatLength));
6446 // Handle exceptionally long strings in the runtime system.
6447 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6448 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6449 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6450 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
6451
6452 // If result is not supposed to be flat, allocate a cons string object.
6453 // If both strings are ASCII the result is an ASCII cons string.
6454 if (flags_ != NO_STRING_ADD_FLAGS) {
6455 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6456 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6457 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6458 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6459 }
6460 Label non_ascii, allocated, ascii_data;
6461 STATIC_ASSERT(kTwoByteStringTag == 0);
6462 // Branch to non_ascii if either string-encoding field is zero (non-ascii).
6463 __ And(t4, t0, Operand(t1));
6464 __ And(t4, t4, Operand(kStringEncodingMask));
6465 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6466
6467 // Allocate an ASCII cons string.
6468 __ bind(&ascii_data);
6469 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
6470 __ bind(&allocated);
6471 // Fill the fields of the cons string.
6472 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
6473 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
6474 __ mov(v0, t3);
6475 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6476 __ Addu(sp, sp, Operand(2 * kPointerSize));
6477 __ Ret();
6478
6479 __ bind(&non_ascii);
6480 // At least one of the strings is two-byte. Check whether it happens
6481 // to contain only ASCII characters.
6482 // t0: first instance type.
6483 // t1: second instance type.
6484 // Branch to if _both_ instances have kAsciiDataHintMask set.
6485 __ And(at, t0, Operand(kAsciiDataHintMask));
6486 __ and_(at, at, t1);
6487 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6488
6489 __ xor_(t0, t0, t1);
6490 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6491 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6492 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6493
6494 // Allocate a two byte cons string.
6495 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
6496 __ Branch(&allocated);
6497
6498 // Handle creating a flat result. First check that both strings are
6499 // sequential and that they have the same encoding.
6500 // a0: first string
6501 // a1: second string
6502 // a2: length of first string
6503 // a3: length of second string
6504 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6505 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6506 // t2: sum of lengths.
6507 __ bind(&string_add_flat_result);
6508 if (flags_ != NO_STRING_ADD_FLAGS) {
6509 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6510 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6511 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6512 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6513 }
6514 // Check that both strings are sequential, meaning that we
6515 // branch to runtime if either string tag is non-zero.
6516 STATIC_ASSERT(kSeqStringTag == 0);
6517 __ Or(t4, t0, Operand(t1));
6518 __ And(t4, t4, Operand(kStringRepresentationMask));
6519 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6520
6521 // Now check if both strings have the same encoding (ASCII/Two-byte).
6522 // a0: first string
6523 // a1: second string
6524 // a2: length of first string
6525 // a3: length of second string
6526 // t0: first string instance type
6527 // t1: second string instance type
6528 // t2: sum of lengths.
6529 Label non_ascii_string_add_flat_result;
6530 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
6531 __ xor_(t3, t1, t0);
6532 __ And(t3, t3, Operand(kStringEncodingMask));
6533 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
6534 // And see if it's ASCII (0) or two-byte (1).
6535 __ And(t3, t0, Operand(kStringEncodingMask));
6536 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
6537
6538 // Both strings are sequential ASCII strings. We also know that they are
6539 // short (since the sum of the lengths is less than kMinNonFlatLength).
6540 // t2: length of resulting flat string
6541 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
6542 // Locate first character of result.
6543 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6544 // Locate first character of first argument.
6545 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6546 // a0: first character of first string.
6547 // a1: second string.
6548 // a2: length of first string.
6549 // a3: length of second string.
6550 // t2: first character of result.
6551 // t3: result string.
6552 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
6553
6554 // Load second argument and locate first character.
6555 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6556 // a1: first character of second string.
6557 // a3: length of second string.
6558 // t2: next character of result.
6559 // t3: result string.
6560 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6561 __ mov(v0, t3);
6562 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6563 __ Addu(sp, sp, Operand(2 * kPointerSize));
6564 __ Ret();
6565
6566 __ bind(&non_ascii_string_add_flat_result);
6567 // Both strings are sequential two byte strings.
6568 // a0: first string.
6569 // a1: second string.
6570 // a2: length of first string.
6571 // a3: length of second string.
6572 // t2: sum of length of strings.
6573 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6574 // a0: first string.
6575 // a1: second string.
6576 // a2: length of first string.
6577 // a3: length of second string.
6578 // t3: result string.
6579
6580 // Locate first character of result.
6581 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6582 // Locate first character of first argument.
6583 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6584
6585 // a0: first character of first string.
6586 // a1: second string.
6587 // a2: length of first string.
6588 // a3: length of second string.
6589 // t2: first character of result.
6590 // t3: result string.
6591 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6592
6593 // Locate first character of second argument.
6594 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6595
6596 // a1: first character of second string.
6597 // a3: length of second string.
6598 // t2: next character of result (after copy of first string).
6599 // t3: result string.
6600 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6601
6602 __ mov(v0, t3);
6603 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6604 __ Addu(sp, sp, Operand(2 * kPointerSize));
6605 __ Ret();
6606
6607 // Just jump to runtime to add the two strings.
6608 __ bind(&string_add_runtime);
6609 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6610
6611 if (call_builtin.is_linked()) {
6612 __ bind(&call_builtin);
6613 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6614 }
6615}
6616
6617
6618void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6619 int stack_offset,
6620 Register arg,
6621 Register scratch1,
6622 Register scratch2,
6623 Register scratch3,
6624 Register scratch4,
6625 Label* slow) {
6626 // First check if the argument is already a string.
6627 Label not_string, done;
6628 __ JumpIfSmi(arg, &not_string);
6629 __ GetObjectType(arg, scratch1, scratch1);
6630 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6631
6632 // Check the number to string cache.
6633 Label not_cached;
6634 __ bind(&not_string);
6635 // Puts the cached result into scratch1.
6636 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6637 arg,
6638 scratch1,
6639 scratch2,
6640 scratch3,
6641 scratch4,
6642 false,
6643 &not_cached);
6644 __ mov(arg, scratch1);
6645 __ sw(arg, MemOperand(sp, stack_offset));
6646 __ jmp(&done);
6647
6648 // Check if the argument is a safe string wrapper.
6649 __ bind(&not_cached);
6650 __ JumpIfSmi(arg, slow);
6651 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6652 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6653 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6654 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6655 __ And(scratch2, scratch2, scratch4);
6656 __ Branch(slow, ne, scratch2, Operand(scratch4));
6657 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6658 __ sw(arg, MemOperand(sp, stack_offset));
6659
6660 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01006661}
6662
6663
6664void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006665 ASSERT(state_ == CompareIC::SMIS);
6666 Label miss;
6667 __ Or(a2, a1, a0);
6668 __ JumpIfNotSmi(a2, &miss);
6669
6670 if (GetCondition() == eq) {
6671 // For equality we do not care about the sign of the result.
6672 __ Subu(v0, a0, a1);
6673 } else {
6674 // Untag before subtracting to avoid handling overflow.
6675 __ SmiUntag(a1);
6676 __ SmiUntag(a0);
6677 __ Subu(v0, a1, a0);
6678 }
6679 __ Ret();
6680
6681 __ bind(&miss);
6682 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006683}
6684
6685
6686void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006687 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6688
6689 Label generic_stub;
6690 Label unordered;
6691 Label miss;
6692 __ And(a2, a1, Operand(a0));
6693 __ JumpIfSmi(a2, &generic_stub);
6694
6695 __ GetObjectType(a0, a2, a2);
6696 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6697 __ GetObjectType(a1, a2, a2);
6698 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6699
6700 // Inlining the double comparison and falling back to the general compare
6701 // stub if NaN is involved or FPU is unsupported.
6702 if (CpuFeatures::IsSupported(FPU)) {
6703 CpuFeatures::Scope scope(FPU);
6704
6705 // Load left and right operand.
6706 __ Subu(a2, a1, Operand(kHeapObjectTag));
6707 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6708 __ Subu(a2, a0, Operand(kHeapObjectTag));
6709 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6710
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006711 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6712 Label fpu_eq, fpu_lt;
6713 // Test if equal, and also handle the unordered/NaN case.
6714 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006715
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006716 // Test if less (unordered case is already handled).
6717 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006718
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006719 // Otherwise it's greater, so just fall thru, and return.
6720 __ Ret(USE_DELAY_SLOT);
6721 __ li(v0, Operand(GREATER)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006722
Ben Murdoch257744e2011-11-30 15:57:28 +00006723 __ bind(&fpu_eq);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006724 __ Ret(USE_DELAY_SLOT);
6725 __ li(v0, Operand(EQUAL)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006726
6727 __ bind(&fpu_lt);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006728 __ Ret(USE_DELAY_SLOT);
6729 __ li(v0, Operand(LESS)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006730
6731 __ bind(&unordered);
6732 }
6733
6734 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6735 __ bind(&generic_stub);
6736 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6737
6738 __ bind(&miss);
6739 GenerateMiss(masm);
6740}
6741
6742
6743void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6744 ASSERT(state_ == CompareIC::SYMBOLS);
6745 Label miss;
6746
6747 // Registers containing left and right operands respectively.
6748 Register left = a1;
6749 Register right = a0;
6750 Register tmp1 = a2;
6751 Register tmp2 = a3;
6752
6753 // Check that both operands are heap objects.
6754 __ JumpIfEitherSmi(left, right, &miss);
6755
6756 // Check that both operands are symbols.
6757 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6758 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6759 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6760 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6761 STATIC_ASSERT(kSymbolTag != 0);
6762 __ And(tmp1, tmp1, Operand(tmp2));
6763 __ And(tmp1, tmp1, kIsSymbolMask);
6764 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6765 // Make sure a0 is non-zero. At this point input operands are
6766 // guaranteed to be non-zero.
6767 ASSERT(right.is(a0));
6768 STATIC_ASSERT(EQUAL == 0);
6769 STATIC_ASSERT(kSmiTag == 0);
6770 __ mov(v0, right);
6771 // Symbols are compared by identity.
6772 __ Ret(ne, left, Operand(right));
6773 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6774 __ Ret();
6775
6776 __ bind(&miss);
6777 GenerateMiss(masm);
6778}
6779
6780
6781void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6782 ASSERT(state_ == CompareIC::STRINGS);
6783 Label miss;
6784
6785 // Registers containing left and right operands respectively.
6786 Register left = a1;
6787 Register right = a0;
6788 Register tmp1 = a2;
6789 Register tmp2 = a3;
6790 Register tmp3 = t0;
6791 Register tmp4 = t1;
6792 Register tmp5 = t2;
6793
6794 // Check that both operands are heap objects.
6795 __ JumpIfEitherSmi(left, right, &miss);
6796
6797 // Check that both operands are strings. This leaves the instance
6798 // types loaded in tmp1 and tmp2.
6799 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6800 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6801 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6802 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6803 STATIC_ASSERT(kNotStringTag != 0);
6804 __ Or(tmp3, tmp1, tmp2);
6805 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6806 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6807
6808 // Fast check for identical strings.
6809 Label left_ne_right;
6810 STATIC_ASSERT(EQUAL == 0);
6811 STATIC_ASSERT(kSmiTag == 0);
6812 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6813 __ mov(v0, zero_reg); // In the delay slot.
6814 __ Ret();
6815 __ bind(&left_ne_right);
6816
6817 // Handle not identical strings.
6818
6819 // Check that both strings are symbols. If they are, we're done
6820 // because we already know they are not identical.
6821 ASSERT(GetCondition() == eq);
6822 STATIC_ASSERT(kSymbolTag != 0);
6823 __ And(tmp3, tmp1, Operand(tmp2));
6824 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6825 Label is_symbol;
6826 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6827 __ mov(v0, a0); // In the delay slot.
6828 // Make sure a0 is non-zero. At this point input operands are
6829 // guaranteed to be non-zero.
6830 ASSERT(right.is(a0));
6831 __ Ret();
6832 __ bind(&is_symbol);
6833
6834 // Check that both strings are sequential ASCII.
6835 Label runtime;
6836 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6837 &runtime);
6838
6839 // Compare flat ASCII strings. Returns when done.
6840 StringCompareStub::GenerateFlatAsciiStringEquals(
6841 masm, left, right, tmp1, tmp2, tmp3);
6842
6843 // Handle more complex cases in runtime.
6844 __ bind(&runtime);
6845 __ Push(left, right);
6846 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6847
6848 __ bind(&miss);
6849 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006850}
6851
6852
6853void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006854 ASSERT(state_ == CompareIC::OBJECTS);
6855 Label miss;
6856 __ And(a2, a1, Operand(a0));
6857 __ JumpIfSmi(a2, &miss);
6858
6859 __ GetObjectType(a0, a2, a2);
6860 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6861 __ GetObjectType(a1, a2, a2);
6862 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6863
6864 ASSERT(GetCondition() == eq);
6865 __ Subu(v0, a0, Operand(a1));
6866 __ Ret();
6867
6868 __ bind(&miss);
6869 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006870}
6871
6872
6873void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006874 __ Push(a1, a0);
6875 __ push(ra);
6876
6877 // Call the runtime system in a fresh internal frame.
6878 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6879 masm->isolate());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006880 {
6881 FrameScope scope(masm, StackFrame::INTERNAL);
6882 __ Push(a1, a0);
6883 __ li(t0, Operand(Smi::FromInt(op_)));
6884 __ push(t0);
6885 __ CallExternalReference(miss, 3);
6886 }
Ben Murdoch257744e2011-11-30 15:57:28 +00006887 // Compute the entry point of the rewritten stub.
6888 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6889 // Restore registers.
6890 __ pop(ra);
6891 __ pop(a0);
6892 __ pop(a1);
6893 __ Jump(a2);
6894}
6895
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006896
Ben Murdoch257744e2011-11-30 15:57:28 +00006897void DirectCEntryStub::Generate(MacroAssembler* masm) {
6898 // No need to pop or drop anything, LeaveExitFrame will restore the old
6899 // stack, thus dropping the allocated space for the return value.
6900 // The saved ra is after the reserved stack space for the 4 args.
6901 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6902
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006903 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006904 // In case of an error the return address may point to a memory area
6905 // filled with kZapValue by the GC.
6906 // Dereference the address and check for this.
6907 __ lw(t0, MemOperand(t9));
6908 __ Assert(ne, "Received invalid return address.", t0,
6909 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6910 }
6911 __ Jump(t9);
Steve Block44f0eee2011-05-26 01:26:41 +01006912}
6913
6914
Ben Murdoch257744e2011-11-30 15:57:28 +00006915void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6916 ExternalReference function) {
6917 __ li(t9, Operand(function));
6918 this->GenerateCall(masm, t9);
6919}
6920
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006921
Ben Murdoch257744e2011-11-30 15:57:28 +00006922void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6923 Register target) {
6924 __ Move(t9, target);
6925 __ AssertStackIsAligned();
6926 // Allocate space for arg slots.
6927 __ Subu(sp, sp, kCArgsSlotsSize);
6928
6929 // Block the trampoline pool through the whole function to make sure the
6930 // number of generated instructions is constant.
6931 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6932
6933 // We need to get the current 'pc' value, which is not available on MIPS.
6934 Label find_ra;
6935 masm->bal(&find_ra); // ra = pc + 8.
6936 masm->nop(); // Branch delay slot nop.
6937 masm->bind(&find_ra);
6938
6939 const int kNumInstructionsToJump = 6;
6940 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6941 // Push return address (accessible to GC through exit frame pc).
6942 // This spot for ra was reserved in EnterExitFrame.
6943 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6944 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6945 RelocInfo::CODE_TARGET), true);
6946 // Call the function.
6947 masm->Jump(t9);
6948 // Make sure the stored 'ra' points to this position.
6949 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6950}
6951
6952
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006953void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6954 Label* miss,
6955 Label* done,
6956 Register receiver,
6957 Register properties,
6958 Handle<String> name,
6959 Register scratch0) {
6960 // If names of slots in range from 1 to kProbes - 1 for the hash value are
Ben Murdoch257744e2011-11-30 15:57:28 +00006961 // not equal to the name and kProbes-th slot is not used (its name is the
6962 // undefined value), it guarantees the hash table doesn't contain the
6963 // property. It's true even if some slots represent deleted properties
6964 // (their names are the null value).
6965 for (int i = 0; i < kInlinedProbes; i++) {
6966 // scratch0 points to properties hash.
6967 // Compute the masked index: (hash + i + i * i) & mask.
6968 Register index = scratch0;
6969 // Capacity is smi 2^n.
6970 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
6971 __ Subu(index, index, Operand(1));
6972 __ And(index, index, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006973 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
Ben Murdoch257744e2011-11-30 15:57:28 +00006974
6975 // Scale the index by multiplying by the entry size.
6976 ASSERT(StringDictionary::kEntrySize == 3);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006977 __ sll(at, index, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00006978 __ Addu(index, index, at);
6979
6980 Register entity_name = scratch0;
6981 // Having undefined at this place means the name is not contained.
6982 ASSERT_EQ(kSmiTagSize, 1);
6983 Register tmp = properties;
Ben Murdoch257744e2011-11-30 15:57:28 +00006984 __ sll(scratch0, index, 1);
6985 __ Addu(tmp, properties, scratch0);
6986 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6987
6988 ASSERT(!tmp.is(entity_name));
6989 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6990 __ Branch(done, eq, entity_name, Operand(tmp));
6991
6992 if (i != kInlinedProbes - 1) {
6993 // Stop if found the property.
6994 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
6995
6996 // Check if the entry name is not a symbol.
6997 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6998 __ lbu(entity_name,
6999 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7000 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7001 __ Branch(miss, eq, scratch0, Operand(zero_reg));
7002
7003 // Restore the properties.
7004 __ lw(properties,
7005 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7006 }
7007 }
7008
7009 const int spill_mask =
7010 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007011 a2.bit() | a1.bit() | a0.bit() | v0.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00007012
7013 __ MultiPush(spill_mask);
7014 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7015 __ li(a1, Operand(Handle<String>(name)));
7016 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007017 __ CallStub(&stub);
7018 __ mov(at, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00007019 __ MultiPop(spill_mask);
7020
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007021 __ Branch(done, eq, at, Operand(zero_reg));
7022 __ Branch(miss, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00007023}
7024
7025
7026// Probe the string dictionary in the |elements| register. Jump to the
7027// |done| label if a property with the given name is found. Jump to
7028// the |miss| label otherwise.
7029// If lookup was successful |scratch2| will be equal to elements + 4 * index.
7030void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7031 Label* miss,
7032 Label* done,
7033 Register elements,
7034 Register name,
7035 Register scratch1,
7036 Register scratch2) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007037 ASSERT(!elements.is(scratch1));
7038 ASSERT(!elements.is(scratch2));
7039 ASSERT(!name.is(scratch1));
7040 ASSERT(!name.is(scratch2));
7041
Ben Murdoch257744e2011-11-30 15:57:28 +00007042 // Assert that name contains a string.
7043 if (FLAG_debug_code) __ AbortIfNotString(name);
7044
7045 // Compute the capacity mask.
7046 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7047 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7048 __ Subu(scratch1, scratch1, Operand(1));
7049
7050 // Generate an unrolled loop that performs a few probes before
7051 // giving up. Measurements done on Gmail indicate that 2 probes
7052 // cover ~93% of loads from dictionaries.
7053 for (int i = 0; i < kInlinedProbes; i++) {
7054 // Compute the masked index: (hash + i + i * i) & mask.
7055 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7056 if (i > 0) {
7057 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7058 // the hash in a separate instruction. The value hash + i + i * i is right
7059 // shifted in the following and instruction.
7060 ASSERT(StringDictionary::GetProbeOffset(i) <
7061 1 << (32 - String::kHashFieldOffset));
7062 __ Addu(scratch2, scratch2, Operand(
7063 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7064 }
7065 __ srl(scratch2, scratch2, String::kHashShift);
7066 __ And(scratch2, scratch1, scratch2);
7067
7068 // Scale the index by multiplying by the element size.
7069 ASSERT(StringDictionary::kEntrySize == 3);
7070 // scratch2 = scratch2 * 3.
7071
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007072 __ sll(at, scratch2, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00007073 __ Addu(scratch2, scratch2, at);
7074
7075 // Check if the key is identical to the name.
7076 __ sll(at, scratch2, 2);
7077 __ Addu(scratch2, elements, at);
7078 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7079 __ Branch(done, eq, name, Operand(at));
7080 }
7081
7082 const int spill_mask =
7083 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007084 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
Ben Murdoch257744e2011-11-30 15:57:28 +00007085 ~(scratch1.bit() | scratch2.bit());
7086
7087 __ MultiPush(spill_mask);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007088 if (name.is(a0)) {
7089 ASSERT(!elements.is(a1));
7090 __ Move(a1, name);
7091 __ Move(a0, elements);
7092 } else {
7093 __ Move(a0, elements);
7094 __ Move(a1, name);
7095 }
Ben Murdoch257744e2011-11-30 15:57:28 +00007096 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7097 __ CallStub(&stub);
7098 __ mov(scratch2, a2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007099 __ mov(at, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00007100 __ MultiPop(spill_mask);
7101
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007102 __ Branch(done, ne, at, Operand(zero_reg));
7103 __ Branch(miss, eq, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00007104}
7105
7106
7107void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007108 // This stub overrides SometimesSetsUpAFrame() to return false. That means
7109 // we cannot call anything that could cause a GC from this stub.
Ben Murdoch257744e2011-11-30 15:57:28 +00007110 // Registers:
7111 // result: StringDictionary to probe
7112 // a1: key
7113 // : StringDictionary to probe.
7114 // index_: will hold an index of entry if lookup is successful.
7115 // might alias with result_.
7116 // Returns:
7117 // result_ is zero if lookup failed, non zero otherwise.
7118
7119 Register result = v0;
7120 Register dictionary = a0;
7121 Register key = a1;
7122 Register index = a2;
7123 Register mask = a3;
7124 Register hash = t0;
7125 Register undefined = t1;
7126 Register entry_key = t2;
7127
7128 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7129
7130 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7131 __ sra(mask, mask, kSmiTagSize);
7132 __ Subu(mask, mask, Operand(1));
7133
7134 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7135
7136 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7137
7138 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7139 // Compute the masked index: (hash + i + i * i) & mask.
7140 // Capacity is smi 2^n.
7141 if (i > 0) {
7142 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7143 // the hash in a separate instruction. The value hash + i + i * i is right
7144 // shifted in the following and instruction.
7145 ASSERT(StringDictionary::GetProbeOffset(i) <
7146 1 << (32 - String::kHashFieldOffset));
7147 __ Addu(index, hash, Operand(
7148 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7149 } else {
7150 __ mov(index, hash);
7151 }
7152 __ srl(index, index, String::kHashShift);
7153 __ And(index, mask, index);
7154
7155 // Scale the index by multiplying by the entry size.
7156 ASSERT(StringDictionary::kEntrySize == 3);
7157 // index *= 3.
7158 __ mov(at, index);
7159 __ sll(index, index, 1);
7160 __ Addu(index, index, at);
7161
7162
7163 ASSERT_EQ(kSmiTagSize, 1);
7164 __ sll(index, index, 2);
7165 __ Addu(index, index, dictionary);
7166 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7167
7168 // Having undefined at this place means the name is not contained.
7169 __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
7170
7171 // Stop if found the property.
7172 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7173
7174 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7175 // Check if the entry name is not a symbol.
7176 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7177 __ lbu(entry_key,
7178 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7179 __ And(result, entry_key, Operand(kIsSymbolMask));
7180 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7181 }
7182 }
7183
7184 __ bind(&maybe_in_dictionary);
7185 // If we are doing negative lookup then probing failure should be
7186 // treated as a lookup success. For positive lookup probing failure
7187 // should be treated as lookup failure.
7188 if (mode_ == POSITIVE_LOOKUP) {
7189 __ mov(result, zero_reg);
7190 __ Ret();
7191 }
7192
7193 __ bind(&in_dictionary);
7194 __ li(result, 1);
7195 __ Ret();
7196
7197 __ bind(&not_in_dictionary);
7198 __ mov(result, zero_reg);
7199 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01007200}
7201
7202
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007203struct AheadOfTimeWriteBarrierStubList {
7204 Register object, value, address;
7205 RememberedSetAction action;
7206};
7207
7208
7209struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7210 // Used in RegExpExecStub.
7211 { s2, s0, t3, EMIT_REMEMBERED_SET },
7212 { s2, a2, t3, EMIT_REMEMBERED_SET },
7213 // Used in CompileArrayPushCall.
7214 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7215 // Also used in KeyedStoreIC::GenerateGeneric.
7216 { a3, t0, t1, EMIT_REMEMBERED_SET },
7217 // Used in CompileStoreGlobal.
7218 { t0, a1, a2, OMIT_REMEMBERED_SET },
7219 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7220 { a1, a2, a3, EMIT_REMEMBERED_SET },
7221 { a3, a2, a1, EMIT_REMEMBERED_SET },
7222 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7223 { a2, a1, a3, EMIT_REMEMBERED_SET },
7224 { a3, a1, a2, EMIT_REMEMBERED_SET },
7225 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7226 { t0, a2, a3, EMIT_REMEMBERED_SET },
7227 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7228 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7229 // and ElementsTransitionGenerator::GenerateDoubleToObject
7230 { a2, a3, t5, EMIT_REMEMBERED_SET },
7231 // ElementsTransitionGenerator::GenerateDoubleToObject
7232 { t2, a2, a0, EMIT_REMEMBERED_SET },
7233 { a2, t2, t5, EMIT_REMEMBERED_SET },
7234 // StoreArrayLiteralElementStub::Generate
7235 { t1, a0, t2, EMIT_REMEMBERED_SET },
7236 // Null termination.
7237 { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
7238};
7239
7240
7241bool RecordWriteStub::IsPregenerated() {
7242 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7243 !entry->object.is(no_reg);
7244 entry++) {
7245 if (object_.is(entry->object) &&
7246 value_.is(entry->value) &&
7247 address_.is(entry->address) &&
7248 remembered_set_action_ == entry->action &&
7249 save_fp_regs_mode_ == kDontSaveFPRegs) {
7250 return true;
7251 }
7252 }
7253 return false;
7254}
7255
7256
7257bool StoreBufferOverflowStub::IsPregenerated() {
7258 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7259}
7260
7261
7262void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7263 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7264 stub1.GetCode()->set_is_pregenerated(true);
7265}
7266
7267
7268void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7269 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7270 !entry->object.is(no_reg);
7271 entry++) {
7272 RecordWriteStub stub(entry->object,
7273 entry->value,
7274 entry->address,
7275 entry->action,
7276 kDontSaveFPRegs);
7277 stub.GetCode()->set_is_pregenerated(true);
7278 }
7279}
7280
7281
7282// Takes the input in 3 registers: address_ value_ and object_. A pointer to
7283// the value has just been written into the object, now this stub makes sure
7284// we keep the GC informed. The word in the object where the value has been
7285// written is in the address register.
7286void RecordWriteStub::Generate(MacroAssembler* masm) {
7287 Label skip_to_incremental_noncompacting;
7288 Label skip_to_incremental_compacting;
7289
7290 // The first two branch+nop instructions are generated with labels so as to
7291 // get the offset fixed up correctly by the bind(Label*) call. We patch it
7292 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7293 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7294 // incremental heap marking.
7295 // See RecordWriteStub::Patch for details.
7296 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7297 __ nop();
7298 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7299 __ nop();
7300
7301 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7302 __ RememberedSetHelper(object_,
7303 address_,
7304 value_,
7305 save_fp_regs_mode_,
7306 MacroAssembler::kReturnAtEnd);
7307 }
7308 __ Ret();
7309
7310 __ bind(&skip_to_incremental_noncompacting);
7311 GenerateIncremental(masm, INCREMENTAL);
7312
7313 __ bind(&skip_to_incremental_compacting);
7314 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7315
7316 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7317 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7318
7319 PatchBranchIntoNop(masm, 0);
7320 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7321}
7322
7323
7324void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7325 regs_.Save(masm);
7326
7327 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7328 Label dont_need_remembered_set;
7329
7330 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7331 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7332 regs_.scratch0(),
7333 &dont_need_remembered_set);
7334
7335 __ CheckPageFlag(regs_.object(),
7336 regs_.scratch0(),
7337 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7338 ne,
7339 &dont_need_remembered_set);
7340
7341 // First notify the incremental marker if necessary, then update the
7342 // remembered set.
7343 CheckNeedsToInformIncrementalMarker(
7344 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7345 InformIncrementalMarker(masm, mode);
7346 regs_.Restore(masm);
7347 __ RememberedSetHelper(object_,
7348 address_,
7349 value_,
7350 save_fp_regs_mode_,
7351 MacroAssembler::kReturnAtEnd);
7352
7353 __ bind(&dont_need_remembered_set);
7354 }
7355
7356 CheckNeedsToInformIncrementalMarker(
7357 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7358 InformIncrementalMarker(masm, mode);
7359 regs_.Restore(masm);
7360 __ Ret();
7361}
7362
7363
7364void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7365 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7366 int argument_count = 3;
7367 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7368 Register address =
7369 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7370 ASSERT(!address.is(regs_.object()));
7371 ASSERT(!address.is(a0));
7372 __ Move(address, regs_.address());
7373 __ Move(a0, regs_.object());
7374 if (mode == INCREMENTAL_COMPACTION) {
7375 __ Move(a1, address);
7376 } else {
7377 ASSERT(mode == INCREMENTAL);
7378 __ lw(a1, MemOperand(address, 0));
7379 }
7380 __ li(a2, Operand(ExternalReference::isolate_address()));
7381
7382 AllowExternalCallThatCantCauseGC scope(masm);
7383 if (mode == INCREMENTAL_COMPACTION) {
7384 __ CallCFunction(
7385 ExternalReference::incremental_evacuation_record_write_function(
7386 masm->isolate()),
7387 argument_count);
7388 } else {
7389 ASSERT(mode == INCREMENTAL);
7390 __ CallCFunction(
7391 ExternalReference::incremental_marking_record_write_function(
7392 masm->isolate()),
7393 argument_count);
7394 }
7395 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7396}
7397
7398
7399void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7400 MacroAssembler* masm,
7401 OnNoNeedToInformIncrementalMarker on_no_need,
7402 Mode mode) {
7403 Label on_black;
7404 Label need_incremental;
7405 Label need_incremental_pop_scratch;
7406
7407 // Let's look at the color of the object: If it is not black we don't have
7408 // to inform the incremental marker.
7409 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7410
7411 regs_.Restore(masm);
7412 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7413 __ RememberedSetHelper(object_,
7414 address_,
7415 value_,
7416 save_fp_regs_mode_,
7417 MacroAssembler::kReturnAtEnd);
7418 } else {
7419 __ Ret();
7420 }
7421
7422 __ bind(&on_black);
7423
7424 // Get the value from the slot.
7425 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7426
7427 if (mode == INCREMENTAL_COMPACTION) {
7428 Label ensure_not_white;
7429
7430 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7431 regs_.scratch1(), // Scratch.
7432 MemoryChunk::kEvacuationCandidateMask,
7433 eq,
7434 &ensure_not_white);
7435
7436 __ CheckPageFlag(regs_.object(),
7437 regs_.scratch1(), // Scratch.
7438 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7439 eq,
7440 &need_incremental);
7441
7442 __ bind(&ensure_not_white);
7443 }
7444
7445 // We need extra registers for this, so we push the object and the address
7446 // register temporarily.
7447 __ Push(regs_.object(), regs_.address());
7448 __ EnsureNotWhite(regs_.scratch0(), // The value.
7449 regs_.scratch1(), // Scratch.
7450 regs_.object(), // Scratch.
7451 regs_.address(), // Scratch.
7452 &need_incremental_pop_scratch);
7453 __ Pop(regs_.object(), regs_.address());
7454
7455 regs_.Restore(masm);
7456 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7457 __ RememberedSetHelper(object_,
7458 address_,
7459 value_,
7460 save_fp_regs_mode_,
7461 MacroAssembler::kReturnAtEnd);
7462 } else {
7463 __ Ret();
7464 }
7465
7466 __ bind(&need_incremental_pop_scratch);
7467 __ Pop(regs_.object(), regs_.address());
7468
7469 __ bind(&need_incremental);
7470
7471 // Fall through when we need to inform the incremental marker.
7472}
7473
7474
7475void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7476 // ----------- S t a t e -------------
7477 // -- a0 : element value to store
7478 // -- a1 : array literal
7479 // -- a2 : map of array literal
7480 // -- a3 : element index as smi
7481 // -- t0 : array literal index in function as smi
7482 // -----------------------------------
7483
7484 Label element_done;
7485 Label double_elements;
7486 Label smi_element;
7487 Label slow_elements;
7488 Label fast_elements;
7489
7490 __ CheckFastElements(a2, t1, &double_elements);
7491 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7492 __ JumpIfSmi(a0, &smi_element);
7493 __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7494
7495 // Store into the array literal requires a elements transition. Call into
7496 // the runtime.
7497 __ bind(&slow_elements);
7498 // call.
7499 __ Push(a1, a3, a0);
7500 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7501 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
7502 __ Push(t1, t0);
7503 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7504
7505 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7506 __ bind(&fast_elements);
7507 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7508 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7509 __ Addu(t2, t1, t2);
7510 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7511 __ sw(a0, MemOperand(t2, 0));
7512 // Update the write barrier for the array store.
7513 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7514 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7515 __ Ret(USE_DELAY_SLOT);
7516 __ mov(v0, a0);
7517
7518 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7519 // FAST_ELEMENTS, and value is Smi.
7520 __ bind(&smi_element);
7521 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7522 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7523 __ Addu(t2, t1, t2);
7524 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
7525 __ Ret(USE_DELAY_SLOT);
7526 __ mov(v0, a0);
7527
7528 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7529 __ bind(&double_elements);
7530 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7531 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
7532 &slow_elements);
7533 __ Ret(USE_DELAY_SLOT);
7534 __ mov(v0, a0);
7535}
7536
7537
Steve Block44f0eee2011-05-26 01:26:41 +01007538#undef __
7539
7540} } // namespace v8::internal
7541
7542#endif // V8_TARGET_ARCH_MIPS