blob: c3c38744220250a85a652c2358cc3ce7f6e60e30 [file] [log] [blame]
Ben Murdoch85b71792012-04-11 18:30:58 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Block44f0eee2011-05-26 01:26:41 +01002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000034#include "codegen.h"
Steve Block44f0eee2011-05-26 01:26:41 +010035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
Ben Murdoch257744e2011-11-30 15:57:28 +000043static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44 Label* slow,
45 Condition cc,
46 bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Register lhs,
49 Register rhs,
50 Label* rhs_not_nan,
51 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55 Register lhs,
56 Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
Steve Block44f0eee2011-05-26 01:26:41 +010068
69void ToNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000070 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ mov(v0, a0);
Ben Murdoch85b71792012-04-11 18:30:58 +010074 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +000075
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ mov(v0, a0);
Ben Murdoch85b71792012-04-11 18:30:58 +010079 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +000080
81 __ bind(&call_builtin);
82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +010084}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000088 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
90 Label gc;
91
92 // Pop the function info from the stack.
93 __ pop(a3);
94
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
97 v0,
98 a1,
99 a2,
100 &gc,
101 TAG_OBJECT);
102
Ben Murdoch85b71792012-04-11 18:30:58 +0100103 int map_index = strict_mode_ == kStrictMode
104 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
105 : Context::FUNCTION_MAP_INDEX;
Ben Murdoch257744e2011-11-30 15:57:28 +0000106
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
Ben Murdoch85b71792012-04-11 18:30:58 +0100131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +0000132
133 // Return result. The argument function info has been popped already.
134 __ Ret();
135
136 // Create a new closure through the slower runtime call.
137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000145 // Try to allocate the context in new space.
146 Label gc;
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
151 v0,
152 a1,
153 a2,
154 &gc,
155 TAG_OBJECT);
156
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
159
Ben Murdoch85b71792012-04-11 18:30:58 +0100160 // Setup the object header.
161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +0000163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
Ben Murdoch85b71792012-04-11 18:30:58 +0100166 // Setup the fixed slots.
Ben Murdoch257744e2011-11-30 15:57:28 +0000167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +0000170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
Ben Murdoch85b71792012-04-11 18:30:58 +0100171
172 // Copy the global object from the previous context.
173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +0000175
176 // Initialize the rest of the slots to undefined.
177 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
178 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
179 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
180 }
181
182 // Remove the on-stack argument and return.
183 __ mov(cp, v0);
Ben Murdoch85b71792012-04-11 18:30:58 +0100184 __ Pop();
185 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +0000186
187 // Need to collect. Call into runtime system.
188 __ bind(&gc);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000189 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100190}
191
192
193void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000194 // Stack layout on entry:
195 // [sp]: constant elements.
196 // [sp + kPointerSize]: literal index.
197 // [sp + (2 * kPointerSize)]: literals array.
198
Ben Murdoch85b71792012-04-11 18:30:58 +0100199 // All sizes here are multiples of kPointerSize.
200 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
201 int size = JSArray::kSize + elements_size;
202
Ben Murdoch257744e2011-11-30 15:57:28 +0000203 // Load boilerplate object into r3 and check if we need to create a
204 // boilerplate.
205 Label slow_case;
206 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
207 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
208 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
209 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
210 __ Addu(t0, a3, t0);
211 __ lw(a3, MemOperand(t0));
212 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
213 __ Branch(&slow_case, eq, a3, Operand(t1));
214
215 if (FLAG_debug_code) {
216 const char* message;
217 Heap::RootListIndex expected_map_index;
Ben Murdoch85b71792012-04-11 18:30:58 +0100218 if (mode_ == CLONE_ELEMENTS) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000219 message = "Expected (writable) fixed array";
220 expected_map_index = Heap::kFixedArrayMapRootIndex;
221 } else {
Ben Murdoch85b71792012-04-11 18:30:58 +0100222 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
Ben Murdoch257744e2011-11-30 15:57:28 +0000223 message = "Expected copy-on-write fixed array";
224 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
225 }
226 __ push(a3);
227 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
228 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
229 __ LoadRoot(at, expected_map_index);
230 __ Assert(eq, message, a3, Operand(at));
231 __ pop(a3);
232 }
233
Ben Murdoch85b71792012-04-11 18:30:58 +0100234 // Allocate both the JS array and the elements array in one big
235 // allocation. This avoids multiple limit checks.
236 // Return new object in v0.
237 __ AllocateInNewSpace(size,
238 v0,
239 a1,
240 a2,
241 &slow_case,
242 TAG_OBJECT);
Ben Murdoch257744e2011-11-30 15:57:28 +0000243
Ben Murdoch85b71792012-04-11 18:30:58 +0100244 // Copy the JS array part.
245 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
246 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
247 __ lw(a1, FieldMemOperand(a3, i));
248 __ sw(a1, FieldMemOperand(v0, i));
249 }
250 }
Ben Murdoch257744e2011-11-30 15:57:28 +0000251
Ben Murdoch85b71792012-04-11 18:30:58 +0100252 if (length_ > 0) {
253 // Get hold of the elements array of the boilerplate and setup the
254 // elements pointer in the resulting object.
255 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
256 __ Addu(a2, v0, Operand(JSArray::kSize));
257 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
Steve Block44f0eee2011-05-26 01:26:41 +0100258
Ben Murdoch85b71792012-04-11 18:30:58 +0100259 // Copy the elements array.
260 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000261 }
262
263 // Return and remove the on-stack parameters.
Ben Murdoch85b71792012-04-11 18:30:58 +0100264 __ Addu(sp, sp, Operand(3 * kPointerSize));
265 __ Ret();
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000266
267 __ bind(&slow_case);
Ben Murdoch85b71792012-04-11 18:30:58 +0100268 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000269}
270
271
Steve Block44f0eee2011-05-26 01:26:41 +0100272// Takes a Smi and converts to an IEEE 64 bit floating point value in two
273// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
274// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
275// scratch register. Destroys the source register. No GC occurs during this
276// stub so you don't have to set up the frame.
277class ConvertToDoubleStub : public CodeStub {
278 public:
279 ConvertToDoubleStub(Register result_reg_1,
280 Register result_reg_2,
281 Register source_reg,
282 Register scratch_reg)
283 : result1_(result_reg_1),
284 result2_(result_reg_2),
285 source_(source_reg),
286 zeros_(scratch_reg) { }
287
288 private:
289 Register result1_;
290 Register result2_;
291 Register source_;
292 Register zeros_;
293
294 // Minor key encoding in 16 bits.
295 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
296 class OpBits: public BitField<Token::Value, 2, 14> {};
297
298 Major MajorKey() { return ConvertToDouble; }
299 int MinorKey() {
300 // Encode the parameters in a unique 16 bit value.
301 return result1_.code() +
302 (result2_.code() << 4) +
303 (source_.code() << 8) +
304 (zeros_.code() << 12);
305 }
306
307 void Generate(MacroAssembler* masm);
Steve Block44f0eee2011-05-26 01:26:41 +0100308};
309
310
311void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000312#ifndef BIG_ENDIAN_FLOATING_POINT
313 Register exponent = result1_;
314 Register mantissa = result2_;
315#else
316 Register exponent = result2_;
317 Register mantissa = result1_;
318#endif
319 Label not_special;
320 // Convert from Smi to integer.
321 __ sra(source_, source_, kSmiTagSize);
322 // Move sign bit from source to destination. This works because the sign bit
323 // in the exponent word of the double has the same position and polarity as
324 // the 2's complement sign bit in a Smi.
325 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
326 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
327 // Subtract from 0 if source was negative.
328 __ subu(at, zero_reg, source_);
Ben Murdoch85b71792012-04-11 18:30:58 +0100329 __ movn(source_, at, exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +0000330
331 // We have -1, 0 or 1, which we treat specially. Register source_ contains
332 // absolute value: it is either equal to 1 (special case of -1 and 1),
333 // greater than 1 (not a special case) or less than 1 (special case of 0).
334 __ Branch(&not_special, gt, source_, Operand(1));
335
336 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
337 static const uint32_t exponent_word_for_1 =
338 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
339 // Safe to use 'at' as dest reg here.
340 __ Or(at, exponent, Operand(exponent_word_for_1));
Ben Murdoch85b71792012-04-11 18:30:58 +0100341 __ movn(exponent, at, source_); // Write exp when source not 0.
Ben Murdoch257744e2011-11-30 15:57:28 +0000342 // 1, 0 and -1 all have 0 for the second word.
343 __ mov(mantissa, zero_reg);
Ben Murdoch85b71792012-04-11 18:30:58 +0100344 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +0000345
346 __ bind(&not_special);
347 // Count leading zeros.
348 // Gets the wrong answer for 0, but we already checked for that case above.
Ben Murdoch85b71792012-04-11 18:30:58 +0100349 __ clz(zeros_, source_);
Ben Murdoch257744e2011-11-30 15:57:28 +0000350 // Compute exponent and or it into the exponent register.
351 // We use mantissa as a scratch register here.
352 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
353 __ subu(mantissa, mantissa, zeros_);
354 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
355 __ Or(exponent, exponent, mantissa);
356
357 // Shift up the source chopping the top bit off.
358 __ Addu(zeros_, zeros_, Operand(1));
359 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
360 __ sllv(source_, source_, zeros_);
361 // Compute lower part of fraction (last 12 bits).
362 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
363 // And the top (top 20 bits).
364 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100365 __ or_(exponent, exponent, source_);
Ben Murdoch85b71792012-04-11 18:30:58 +0100366
367 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100368}
369
370
Steve Block44f0eee2011-05-26 01:26:41 +0100371void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
372 FloatingPointHelper::Destination destination,
373 Register scratch1,
374 Register scratch2) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000375 if (CpuFeatures::IsSupported(FPU)) {
376 CpuFeatures::Scope scope(FPU);
377 __ sra(scratch1, a0, kSmiTagSize);
378 __ mtc1(scratch1, f14);
379 __ cvt_d_w(f14, f14);
380 __ sra(scratch1, a1, kSmiTagSize);
381 __ mtc1(scratch1, f12);
382 __ cvt_d_w(f12, f12);
383 if (destination == kCoreRegisters) {
384 __ Move(a2, a3, f14);
385 __ Move(a0, a1, f12);
386 }
387 } else {
388 ASSERT(destination == kCoreRegisters);
389 // Write Smi from a0 to a3 and a2 in double format.
390 __ mov(scratch1, a0);
391 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
392 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000393 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000394 // Write Smi from a1 to a1 and a0 in double format.
395 __ mov(scratch1, a1);
396 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000397 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000398 __ pop(ra);
399 }
Steve Block44f0eee2011-05-26 01:26:41 +0100400}
401
402
403void FloatingPointHelper::LoadOperands(
404 MacroAssembler* masm,
405 FloatingPointHelper::Destination destination,
406 Register heap_number_map,
407 Register scratch1,
408 Register scratch2,
409 Label* slow) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000410
411 // Load right operand (a0) to f12 or a2/a3.
412 LoadNumber(masm, destination,
413 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
414
415 // Load left operand (a1) to f14 or a0/a1.
416 LoadNumber(masm, destination,
417 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
Steve Block44f0eee2011-05-26 01:26:41 +0100418}
419
420
421void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
422 Destination destination,
423 Register object,
424 FPURegister dst,
425 Register dst1,
426 Register dst2,
427 Register heap_number_map,
428 Register scratch1,
429 Register scratch2,
430 Label* not_number) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000431 if (FLAG_debug_code) {
432 __ AbortIfNotRootValue(heap_number_map,
433 Heap::kHeapNumberMapRootIndex,
434 "HeapNumberMap register clobbered.");
435 }
436
437 Label is_smi, done;
438
Ben Murdoch85b71792012-04-11 18:30:58 +0100439 __ JumpIfSmi(object, &is_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +0000440 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
441
442 // Handle loading a double from a heap number.
443 if (CpuFeatures::IsSupported(FPU) &&
444 destination == kFPURegisters) {
445 CpuFeatures::Scope scope(FPU);
446 // Load the double from tagged HeapNumber to double register.
447
448 // ARM uses a workaround here because of the unaligned HeapNumber
449 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
450 // point in generating even more instructions.
451 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
452 } else {
453 ASSERT(destination == kCoreRegisters);
454 // Load the double from heap number to dst1 and dst2 in double format.
455 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
456 __ lw(dst2, FieldMemOperand(object,
457 HeapNumber::kValueOffset + kPointerSize));
458 }
459 __ Branch(&done);
460
461 // Handle loading a double from a smi.
462 __ bind(&is_smi);
463 if (CpuFeatures::IsSupported(FPU)) {
464 CpuFeatures::Scope scope(FPU);
465 // Convert smi to double using FPU instructions.
Ben Murdoch85b71792012-04-11 18:30:58 +0100466 __ SmiUntag(scratch1, object);
Ben Murdoch257744e2011-11-30 15:57:28 +0000467 __ mtc1(scratch1, dst);
468 __ cvt_d_w(dst, dst);
469 if (destination == kCoreRegisters) {
470 // Load the converted smi to dst1 and dst2 in double format.
471 __ Move(dst1, dst2, dst);
472 }
473 } else {
474 ASSERT(destination == kCoreRegisters);
475 // Write smi to dst1 and dst2 double format.
476 __ mov(scratch1, object);
477 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
478 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000479 __ Call(stub.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000480 __ pop(ra);
481 }
482
483 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +0100484}
485
486
Ben Murdoch257744e2011-11-30 15:57:28 +0000487void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
488 Register object,
489 Register dst,
490 Register heap_number_map,
491 Register scratch1,
492 Register scratch2,
493 Register scratch3,
494 FPURegister double_scratch,
495 Label* not_number) {
496 if (FLAG_debug_code) {
497 __ AbortIfNotRootValue(heap_number_map,
498 Heap::kHeapNumberMapRootIndex,
499 "HeapNumberMap register clobbered.");
500 }
Ben Murdoch85b71792012-04-11 18:30:58 +0100501 Label is_smi;
Ben Murdoch257744e2011-11-30 15:57:28 +0000502 Label done;
503 Label not_in_int32_range;
504
Ben Murdoch85b71792012-04-11 18:30:58 +0100505 __ JumpIfSmi(object, &is_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +0000506 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
507 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
508 __ ConvertToInt32(object,
509 dst,
510 scratch1,
511 scratch2,
512 double_scratch,
513 &not_in_int32_range);
514 __ jmp(&done);
515
516 __ bind(&not_in_int32_range);
517 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
518 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
519
520 __ EmitOutOfInt32RangeTruncate(dst,
521 scratch1,
522 scratch2,
523 scratch3);
524
Ben Murdoch85b71792012-04-11 18:30:58 +0100525 __ jmp(&done);
526
527 __ bind(&is_smi);
528 __ SmiUntag(dst, object);
Ben Murdoch257744e2011-11-30 15:57:28 +0000529 __ bind(&done);
530}
531
532
533void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
534 Register int_scratch,
535 Destination destination,
536 FPURegister double_dst,
537 Register dst1,
538 Register dst2,
539 Register scratch2,
540 FPURegister single_scratch) {
541 ASSERT(!int_scratch.is(scratch2));
542 ASSERT(!int_scratch.is(dst1));
543 ASSERT(!int_scratch.is(dst2));
544
545 Label done;
546
547 if (CpuFeatures::IsSupported(FPU)) {
548 CpuFeatures::Scope scope(FPU);
549 __ mtc1(int_scratch, single_scratch);
550 __ cvt_d_w(double_dst, single_scratch);
551 if (destination == kCoreRegisters) {
552 __ Move(dst1, dst2, double_dst);
553 }
554 } else {
555 Label fewer_than_20_useful_bits;
556 // Expected output:
557 // | dst2 | dst1 |
558 // | s | exp | mantissa |
559
560 // Check for zero.
561 __ mov(dst2, int_scratch);
562 __ mov(dst1, int_scratch);
563 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
564
565 // Preload the sign of the value.
566 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
567 // Get the absolute value of the object (as an unsigned integer).
568 Label skip_sub;
569 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
570 __ Subu(int_scratch, zero_reg, int_scratch);
571 __ bind(&skip_sub);
572
Ben Murdoch85b71792012-04-11 18:30:58 +0100573 // Get mantisssa[51:20].
Ben Murdoch257744e2011-11-30 15:57:28 +0000574
575 // Get the position of the first set bit.
Ben Murdoch85b71792012-04-11 18:30:58 +0100576 __ clz(dst1, int_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +0000577 __ li(scratch2, 31);
578 __ Subu(dst1, scratch2, dst1);
579
580 // Set the exponent.
581 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
582 __ Ins(dst2, scratch2,
583 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
584
585 // Clear the first non null bit.
586 __ li(scratch2, Operand(1));
587 __ sllv(scratch2, scratch2, dst1);
588 __ li(at, -1);
589 __ Xor(scratch2, scratch2, at);
590 __ And(int_scratch, int_scratch, scratch2);
591
592 // Get the number of bits to set in the lower part of the mantissa.
593 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
594 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
595 // Set the higher 20 bits of the mantissa.
596 __ srlv(at, int_scratch, scratch2);
597 __ or_(dst2, dst2, at);
598 __ li(at, 32);
599 __ subu(scratch2, at, scratch2);
600 __ sllv(dst1, int_scratch, scratch2);
601 __ Branch(&done);
602
603 __ bind(&fewer_than_20_useful_bits);
604 __ li(at, HeapNumber::kMantissaBitsInTopWord);
605 __ subu(scratch2, at, dst1);
606 __ sllv(scratch2, int_scratch, scratch2);
607 __ Or(dst2, dst2, scratch2);
608 // Set dst1 to 0.
609 __ mov(dst1, zero_reg);
610 }
611 __ bind(&done);
612}
613
614
615void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
616 Register object,
617 Destination destination,
Ben Murdoch85b71792012-04-11 18:30:58 +0100618 FPURegister double_dst,
Ben Murdoch257744e2011-11-30 15:57:28 +0000619 Register dst1,
620 Register dst2,
621 Register heap_number_map,
622 Register scratch1,
623 Register scratch2,
624 FPURegister single_scratch,
625 Label* not_int32) {
626 ASSERT(!scratch1.is(object) && !scratch2.is(object));
627 ASSERT(!scratch1.is(scratch2));
628 ASSERT(!heap_number_map.is(object) &&
629 !heap_number_map.is(scratch1) &&
630 !heap_number_map.is(scratch2));
631
632 Label done, obj_is_not_smi;
633
634 __ JumpIfNotSmi(object, &obj_is_not_smi);
635 __ SmiUntag(scratch1, object);
636 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
637 scratch2, single_scratch);
638 __ Branch(&done);
639
640 __ bind(&obj_is_not_smi);
641 if (FLAG_debug_code) {
642 __ AbortIfNotRootValue(heap_number_map,
643 Heap::kHeapNumberMapRootIndex,
644 "HeapNumberMap register clobbered.");
645 }
646 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
647
648 // Load the number.
649 if (CpuFeatures::IsSupported(FPU)) {
650 CpuFeatures::Scope scope(FPU);
651 // Load the double value.
652 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
653
Ben Murdoch85b71792012-04-11 18:30:58 +0100654 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
655 // On MIPS a lot of things cannot be implemented the same way so right
656 // now it makes a lot more sense to just do things manually.
657
658 // Save FCSR.
659 __ cfc1(scratch1, FCSR);
660 // Disable FPU exceptions.
661 __ ctc1(zero_reg, FCSR);
662 __ trunc_w_d(single_scratch, double_dst);
663 // Retrieve FCSR.
664 __ cfc1(scratch2, FCSR);
665 // Restore FCSR.
666 __ ctc1(scratch1, FCSR);
667
668 // Check for inexact conversion or exception.
669 __ And(scratch2, scratch2, kFCSRFlagMask);
Ben Murdoch257744e2011-11-30 15:57:28 +0000670
671 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch85b71792012-04-11 18:30:58 +0100672 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000673
674 if (destination == kCoreRegisters) {
675 __ Move(dst1, dst2, double_dst);
676 }
677
678 } else {
679 ASSERT(!scratch1.is(object) && !scratch2.is(object));
680 // Load the double value in the destination registers.
681 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
682 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
683
684 // Check for 0 and -0.
685 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
686 __ Or(scratch1, scratch1, Operand(dst2));
687 __ Branch(&done, eq, scratch1, Operand(zero_reg));
688
689 // Check that the value can be exactly represented by a 32-bit integer.
690 // Jump to not_int32 if that's not the case.
691 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
692
693 // dst1 and dst2 were trashed. Reload the double value.
694 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
695 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
696 }
697
698 __ bind(&done);
699}
700
701
702void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
703 Register object,
704 Register dst,
705 Register heap_number_map,
706 Register scratch1,
707 Register scratch2,
708 Register scratch3,
Ben Murdoch85b71792012-04-11 18:30:58 +0100709 FPURegister double_scratch,
Ben Murdoch257744e2011-11-30 15:57:28 +0000710 Label* not_int32) {
711 ASSERT(!dst.is(object));
712 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
713 ASSERT(!scratch1.is(scratch2) &&
714 !scratch1.is(scratch3) &&
715 !scratch2.is(scratch3));
716
717 Label done;
718
Ben Murdoch85b71792012-04-11 18:30:58 +0100719 // Untag the object into the destination register.
720 __ SmiUntag(dst, object);
721 // Just return if the object is a smi.
722 __ JumpIfSmi(object, &done);
Ben Murdoch257744e2011-11-30 15:57:28 +0000723
724 if (FLAG_debug_code) {
725 __ AbortIfNotRootValue(heap_number_map,
726 Heap::kHeapNumberMapRootIndex,
727 "HeapNumberMap register clobbered.");
728 }
729 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
730
731 // Object is a heap number.
732 // Convert the floating point value to a 32-bit integer.
733 if (CpuFeatures::IsSupported(FPU)) {
734 CpuFeatures::Scope scope(FPU);
735 // Load the double value.
736 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
737
Ben Murdoch85b71792012-04-11 18:30:58 +0100738 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
739 // On MIPS a lot of things cannot be implemented the same way so right
740 // now it makes a lot more sense to just do things manually.
741
742 // Save FCSR.
743 __ cfc1(scratch1, FCSR);
744 // Disable FPU exceptions.
745 __ ctc1(zero_reg, FCSR);
746 __ trunc_w_d(double_scratch, double_scratch);
747 // Retrieve FCSR.
748 __ cfc1(scratch2, FCSR);
749 // Restore FCSR.
750 __ ctc1(scratch1, FCSR);
751
752 // Check for inexact conversion or exception.
753 __ And(scratch2, scratch2, kFCSRFlagMask);
Ben Murdoch257744e2011-11-30 15:57:28 +0000754
755 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch85b71792012-04-11 18:30:58 +0100756 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000757 // Get the result in the destination register.
Ben Murdoch85b71792012-04-11 18:30:58 +0100758 __ mfc1(dst, double_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +0000759
760 } else {
761 // Load the double value in the destination registers.
762 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
763 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
764
765 // Check for 0 and -0.
766 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
767 __ Or(dst, scratch2, Operand(dst));
768 __ Branch(&done, eq, dst, Operand(zero_reg));
769
770 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
771
772 // Registers state after DoubleIs32BitInteger.
773 // dst: mantissa[51:20].
774 // scratch2: 1
775
776 // Shift back the higher bits of the mantissa.
777 __ srlv(dst, dst, scratch3);
778 // Set the implicit first bit.
779 __ li(at, 32);
780 __ subu(scratch3, at, scratch3);
781 __ sllv(scratch2, scratch2, scratch3);
782 __ Or(dst, dst, scratch2);
783 // Set the sign.
784 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
785 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
786 Label skip_sub;
787 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
788 __ Subu(dst, zero_reg, dst);
789 __ bind(&skip_sub);
790 }
791
792 __ bind(&done);
793}
794
795
796void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
797 Register src1,
798 Register src2,
799 Register dst,
800 Register scratch,
801 Label* not_int32) {
802 // Get exponent alone in scratch.
803 __ Ext(scratch,
804 src1,
805 HeapNumber::kExponentShift,
806 HeapNumber::kExponentBits);
807
808 // Substract the bias from the exponent.
809 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
810
811 // src1: higher (exponent) part of the double value.
812 // src2: lower (mantissa) part of the double value.
813 // scratch: unbiased exponent.
814
815 // Fast cases. Check for obvious non 32-bit integer values.
816 // Negative exponent cannot yield 32-bit integers.
817 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
818 // Exponent greater than 31 cannot yield 32-bit integers.
819 // Also, a positive value with an exponent equal to 31 is outside of the
820 // signed 32-bit integer range.
821 // Another way to put it is that if (exponent - signbit) > 30 then the
822 // number cannot be represented as an int32.
823 Register tmp = dst;
824 __ srl(at, src1, 31);
825 __ subu(tmp, scratch, at);
826 __ Branch(not_int32, gt, tmp, Operand(30));
827 // - Bits [21:0] in the mantissa are not null.
828 __ And(tmp, src2, 0x3fffff);
829 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
830
831 // Otherwise the exponent needs to be big enough to shift left all the
832 // non zero bits left. So we need the (30 - exponent) last bits of the
833 // 31 higher bits of the mantissa to be null.
834 // Because bits [21:0] are null, we can check instead that the
Ben Murdoch85b71792012-04-11 18:30:58 +0100835 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
Ben Murdoch257744e2011-11-30 15:57:28 +0000836
837 // Get the 32 higher bits of the mantissa in dst.
838 __ Ext(dst,
839 src2,
840 HeapNumber::kMantissaBitsInTopWord,
841 32 - HeapNumber::kMantissaBitsInTopWord);
842 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
843 __ or_(dst, dst, at);
844
845 // Create the mask and test the lower bits (of the higher bits).
846 __ li(at, 32);
847 __ subu(scratch, at, scratch);
848 __ li(src2, 1);
849 __ sllv(src1, src2, scratch);
850 __ Subu(src1, src1, Operand(1));
851 __ And(src1, dst, src1);
852 __ Branch(not_int32, ne, src1, Operand(zero_reg));
853}
854
855
856void FloatingPointHelper::CallCCodeForDoubleOperation(
857 MacroAssembler* masm,
858 Token::Value op,
859 Register heap_number_result,
860 Register scratch) {
861 // Using core registers:
862 // a0: Left value (least significant part of mantissa).
863 // a1: Left value (sign, exponent, top of mantissa).
864 // a2: Right value (least significant part of mantissa).
865 // a3: Right value (sign, exponent, top of mantissa).
866
867 // Assert that heap_number_result is saved.
868 // We currently always use s0 to pass it.
869 ASSERT(heap_number_result.is(s0));
870
871 // Push the current return address before the C call.
872 __ push(ra);
873 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
874 if (!IsMipsSoftFloatABI) {
875 CpuFeatures::Scope scope(FPU);
876 // We are not using MIPS FPU instructions, and parameters for the runtime
877 // function call are prepaired in a0-a3 registers, but function we are
878 // calling is compiled with hard-float flag and expecting hard float ABI
879 // (parameters in f12/f14 registers). We need to copy parameters from
880 // a0-a3 registers to f12/f14 register pairs.
881 __ Move(f12, a0, a1);
882 __ Move(f14, a2, a3);
883 }
Ben Murdoch85b71792012-04-11 18:30:58 +0100884 // Call C routine that may not cause GC or other trouble.
885 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
886 4);
Ben Murdoch257744e2011-11-30 15:57:28 +0000887 // Store answer in the overwritable heap number.
888 if (!IsMipsSoftFloatABI) {
889 CpuFeatures::Scope scope(FPU);
890 // Double returned in register f0.
891 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
892 } else {
893 // Double returned in registers v0 and v1.
894 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
895 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
896 }
897 // Place heap_number_result in v0 and return to the pushed return address.
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100898 __ mov(v0, heap_number_result);
Ben Murdoch85b71792012-04-11 18:30:58 +0100899 __ pop(ra);
900 __ Ret();
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000901}
902
903
Steve Block44f0eee2011-05-26 01:26:41 +0100904// See comment for class, this does NOT work for int32's that are in Smi range.
905void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000906 Label max_negative_int;
907 // the_int_ has the answer which is a signed int32 but not a Smi.
908 // We test for the special value that has a different exponent.
909 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
910 // Test sign, and save for later conditionals.
911 __ And(sign_, the_int_, Operand(0x80000000u));
912 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
913
914 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
915 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
916 uint32_t non_smi_exponent =
917 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
918 __ li(scratch_, Operand(non_smi_exponent));
919 // Set the sign bit in scratch_ if the value was negative.
920 __ or_(scratch_, scratch_, sign_);
921 // Subtract from 0 if the value was negative.
922 __ subu(at, zero_reg, the_int_);
Ben Murdoch85b71792012-04-11 18:30:58 +0100923 __ movn(the_int_, at, sign_);
Ben Murdoch257744e2011-11-30 15:57:28 +0000924 // We should be masking the implict first digit of the mantissa away here,
925 // but it just ends up combining harmlessly with the last digit of the
926 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
927 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
928 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
929 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
930 __ srl(at, the_int_, shift_distance);
931 __ or_(scratch_, scratch_, at);
932 __ sw(scratch_, FieldMemOperand(the_heap_number_,
933 HeapNumber::kExponentOffset));
934 __ sll(scratch_, the_int_, 32 - shift_distance);
935 __ sw(scratch_, FieldMemOperand(the_heap_number_,
936 HeapNumber::kMantissaOffset));
937 __ Ret();
938
939 __ bind(&max_negative_int);
940 // The max negative int32 is stored as a positive number in the mantissa of
941 // a double because it uses a sign bit instead of using two's complement.
942 // The actual mantissa bits stored are all 0 because the implicit most
943 // significant 1 bit is not stored.
944 non_smi_exponent += 1 << HeapNumber::kExponentShift;
945 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
946 __ sw(scratch_,
947 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
948 __ mov(scratch_, zero_reg);
949 __ sw(scratch_,
950 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
951 __ Ret();
952}
953
954
955// Handle the case where the lhs and rhs are the same object.
956// Equality is almost reflexive (everything but NaN), so this is a test
957// for "identity and not NaN".
958static void EmitIdenticalObjectComparison(MacroAssembler* masm,
959 Label* slow,
960 Condition cc,
961 bool never_nan_nan) {
962 Label not_identical;
963 Label heap_number, return_equal;
964 Register exp_mask_reg = t5;
965
966 __ Branch(&not_identical, ne, a0, Operand(a1));
967
968 // The two objects are identical. If we know that one of them isn't NaN then
969 // we now know they test equal.
970 if (cc != eq || !never_nan_nan) {
971 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
972
973 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
974 // so we do the second best thing - test it ourselves.
975 // They are both equal and they are not both Smis so both of them are not
976 // Smis. If it's not a heap number, then return equal.
977 if (cc == less || cc == greater) {
978 __ GetObjectType(a0, t4, t4);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000979 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +0000980 } else {
981 __ GetObjectType(a0, t4, t4);
982 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
983 // Comparing JS objects with <=, >= is complicated.
984 if (cc != eq) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000985 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +0000986 // Normally here we fall through to return_equal, but undefined is
987 // special: (undefined == undefined) == true, but
988 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
989 if (cc == less_equal || cc == greater_equal) {
990 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
991 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
992 __ Branch(&return_equal, ne, a0, Operand(t2));
993 if (cc == le) {
994 // undefined <= undefined should fail.
995 __ li(v0, Operand(GREATER));
996 } else {
997 // undefined >= undefined should fail.
998 __ li(v0, Operand(LESS));
999 }
1000 __ Ret();
1001 }
1002 }
1003 }
1004 }
1005
1006 __ bind(&return_equal);
1007 if (cc == less) {
1008 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1009 } else if (cc == greater) {
1010 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1011 } else {
1012 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1013 }
1014 __ Ret();
1015
1016 if (cc != eq || !never_nan_nan) {
1017 // For less and greater we don't have to check for NaN since the result of
1018 // x < x is false regardless. For the others here is some code to check
1019 // for NaN.
1020 if (cc != lt && cc != gt) {
1021 __ bind(&heap_number);
1022 // It is a heap number, so return non-equal if it's NaN and equal if it's
1023 // not NaN.
1024
1025 // The representation of NaN values has all exponent bits (52..62) set,
1026 // and not all mantissa bits (0..51) clear.
1027 // Read top bits of double representation (second word of value).
1028 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1029 // Test that exponent bits are all set.
1030 __ And(t3, t2, Operand(exp_mask_reg));
1031 // If all bits not set (ne cond), then not a NaN, objects are equal.
1032 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1033
1034 // Shift out flag and all exponent bits, retaining only mantissa.
1035 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1036 // Or with all low-bits of mantissa.
1037 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1038 __ Or(v0, t3, Operand(t2));
1039 // For equal we already have the right value in v0: Return zero (equal)
1040 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1041 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1042 // value if it's a NaN.
1043 if (cc != eq) {
1044 // All-zero means Infinity means equal.
1045 __ Ret(eq, v0, Operand(zero_reg));
1046 if (cc == le) {
1047 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1048 } else {
1049 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1050 }
1051 }
1052 __ Ret();
1053 }
1054 // No fall through here.
1055 }
1056
1057 __ bind(&not_identical);
1058}
1059
1060
1061static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1062 Register lhs,
1063 Register rhs,
1064 Label* both_loaded_as_doubles,
1065 Label* slow,
1066 bool strict) {
1067 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1068 (lhs.is(a1) && rhs.is(a0)));
1069
1070 Label lhs_is_smi;
Ben Murdoch85b71792012-04-11 18:30:58 +01001071 __ And(t0, lhs, Operand(kSmiTagMask));
1072 __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00001073 // Rhs is a Smi.
1074 // Check whether the non-smi is a heap number.
1075 __ GetObjectType(lhs, t4, t4);
1076 if (strict) {
1077 // If lhs was not a number and rhs was a Smi then strict equality cannot
1078 // succeed. Return non-equal (lhs is already not zero).
1079 __ mov(v0, lhs);
Ben Murdoch85b71792012-04-11 18:30:58 +01001080 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001081 } else {
1082 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1083 // the runtime.
1084 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1085 }
1086
1087 // Rhs is a smi, lhs is a number.
1088 // Convert smi rhs to double.
1089 if (CpuFeatures::IsSupported(FPU)) {
1090 CpuFeatures::Scope scope(FPU);
1091 __ sra(at, rhs, kSmiTagSize);
1092 __ mtc1(at, f14);
1093 __ cvt_d_w(f14, f14);
1094 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1095 } else {
1096 // Load lhs to a double in a2, a3.
1097 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1098 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1099
1100 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1101 __ mov(t6, rhs);
1102 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1103 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001104 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001105
1106 __ pop(ra);
1107 }
1108
1109 // We now have both loaded as doubles.
1110 __ jmp(both_loaded_as_doubles);
1111
1112 __ bind(&lhs_is_smi);
1113 // Lhs is a Smi. Check whether the non-smi is a heap number.
1114 __ GetObjectType(rhs, t4, t4);
1115 if (strict) {
1116 // If lhs was not a number and rhs was a Smi then strict equality cannot
1117 // succeed. Return non-equal.
1118 __ li(v0, Operand(1));
Ben Murdoch85b71792012-04-11 18:30:58 +01001119 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001120 } else {
1121 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1122 // the runtime.
1123 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1124 }
1125
1126 // Lhs is a smi, rhs is a number.
1127 // Convert smi lhs to double.
1128 if (CpuFeatures::IsSupported(FPU)) {
1129 CpuFeatures::Scope scope(FPU);
1130 __ sra(at, lhs, kSmiTagSize);
1131 __ mtc1(at, f12);
1132 __ cvt_d_w(f12, f12);
1133 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1134 } else {
1135 // Convert lhs to a double format. t5 is scratch.
1136 __ mov(t6, lhs);
1137 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1138 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001139 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001140 __ pop(ra);
1141 // Load rhs to a double in a1, a0.
1142 if (rhs.is(a0)) {
1143 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1144 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1145 } else {
1146 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1147 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1148 }
1149 }
1150 // Fall through to both_loaded_as_doubles.
Steve Block44f0eee2011-05-26 01:26:41 +01001151}
1152
1153
1154void EmitNanCheck(MacroAssembler* masm, Condition cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001155 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1156 if (CpuFeatures::IsSupported(FPU)) {
1157 CpuFeatures::Scope scope(FPU);
1158 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1159 __ Move(t0, t1, f14);
1160 __ Move(t2, t3, f12);
1161 } else {
1162 // Lhs and rhs are already loaded to GP registers.
1163 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1164 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1165 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1166 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1167 }
1168 Register rhs_exponent = exp_first ? t0 : t1;
1169 Register lhs_exponent = exp_first ? t2 : t3;
1170 Register rhs_mantissa = exp_first ? t1 : t0;
1171 Register lhs_mantissa = exp_first ? t3 : t2;
1172 Label one_is_nan, neither_is_nan;
1173 Label lhs_not_nan_exp_mask_is_loaded;
1174
1175 Register exp_mask_reg = t4;
1176 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1177 __ and_(t5, lhs_exponent, exp_mask_reg);
1178 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1179
1180 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1181 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1182
1183 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1184
1185 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1186 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1187 __ and_(t5, rhs_exponent, exp_mask_reg);
1188
1189 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1190
1191 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1192 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1193
1194 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1195
1196 __ bind(&one_is_nan);
1197 // NaN comparisons always fail.
1198 // Load whatever we need in v0 to make the comparison fail.
1199 if (cc == lt || cc == le) {
1200 __ li(v0, Operand(GREATER));
1201 } else {
1202 __ li(v0, Operand(LESS));
1203 }
Ben Murdoch85b71792012-04-11 18:30:58 +01001204 __ Ret(); // Return.
Ben Murdoch257744e2011-11-30 15:57:28 +00001205
1206 __ bind(&neither_is_nan);
1207}
1208
1209
1210static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1211 // f12 and f14 have the two doubles. Neither is a NaN.
1212 // Call a native function to do a comparison between two non-NaNs.
1213 // Call C routine that may not cause GC or other trouble.
1214 // We use a call_was and return manually because we need arguments slots to
1215 // be freed.
1216
1217 Label return_result_not_equal, return_result_equal;
1218 if (cc == eq) {
1219 // Doubles are not equal unless they have the same bit pattern.
1220 // Exception: 0 and -0.
1221 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1222 if (CpuFeatures::IsSupported(FPU)) {
1223 CpuFeatures::Scope scope(FPU);
1224 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1225 __ Move(t0, t1, f14);
1226 __ Move(t2, t3, f12);
1227 } else {
1228 // Lhs and rhs are already loaded to GP registers.
1229 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1230 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1231 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1232 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1233 }
1234 Register rhs_exponent = exp_first ? t0 : t1;
1235 Register lhs_exponent = exp_first ? t2 : t3;
1236 Register rhs_mantissa = exp_first ? t1 : t0;
1237 Register lhs_mantissa = exp_first ? t3 : t2;
1238
1239 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1240 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1241
1242 __ subu(v0, rhs_exponent, lhs_exponent);
1243 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1244 // 0, -0 case.
1245 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1246 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1247 __ or_(t4, rhs_exponent, lhs_exponent);
1248 __ or_(t4, t4, rhs_mantissa);
1249
1250 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1251
1252 __ bind(&return_result_equal);
1253 __ li(v0, Operand(EQUAL));
1254 __ Ret();
1255 }
1256
1257 __ bind(&return_result_not_equal);
1258
1259 if (!CpuFeatures::IsSupported(FPU)) {
1260 __ push(ra);
Ben Murdoch85b71792012-04-11 18:30:58 +01001261 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
Ben Murdoch257744e2011-11-30 15:57:28 +00001262 if (!IsMipsSoftFloatABI) {
1263 // We are not using MIPS FPU instructions, and parameters for the runtime
1264 // function call are prepaired in a0-a3 registers, but function we are
1265 // calling is compiled with hard-float flag and expecting hard float ABI
1266 // (parameters in f12/f14 registers). We need to copy parameters from
1267 // a0-a3 registers to f12/f14 register pairs.
1268 __ Move(f12, a0, a1);
1269 __ Move(f14, a2, a3);
1270 }
Ben Murdoch85b71792012-04-11 18:30:58 +01001271 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
Ben Murdoch257744e2011-11-30 15:57:28 +00001272 __ pop(ra); // Because this function returns int, result is in v0.
1273 __ Ret();
1274 } else {
1275 CpuFeatures::Scope scope(FPU);
1276 Label equal, less_than;
Ben Murdoch85b71792012-04-11 18:30:58 +01001277 __ c(EQ, D, f12, f14);
1278 __ bc1t(&equal);
1279 __ nop();
1280
1281 __ c(OLT, D, f12, f14);
1282 __ bc1t(&less_than);
1283 __ nop();
Ben Murdoch257744e2011-11-30 15:57:28 +00001284
1285 // Not equal, not less, not NaN, must be greater.
1286 __ li(v0, Operand(GREATER));
1287 __ Ret();
1288
1289 __ bind(&equal);
1290 __ li(v0, Operand(EQUAL));
1291 __ Ret();
1292
1293 __ bind(&less_than);
1294 __ li(v0, Operand(LESS));
1295 __ Ret();
1296 }
1297}
1298
1299
1300static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1301 Register lhs,
1302 Register rhs) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001303 // If either operand is a JS object or an oddball value, then they are
Ben Murdoch257744e2011-11-30 15:57:28 +00001304 // not equal since their pointers are different.
1305 // There is no test for undetectability in strict equality.
Ben Murdoch85b71792012-04-11 18:30:58 +01001306 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Ben Murdoch257744e2011-11-30 15:57:28 +00001307 Label first_non_object;
1308 // Get the type of the first operand into a2 and compare it with
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001309 // FIRST_SPEC_OBJECT_TYPE.
Ben Murdoch257744e2011-11-30 15:57:28 +00001310 __ GetObjectType(lhs, a2, a2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001311 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001312
1313 // Return non-zero.
1314 Label return_not_equal;
1315 __ bind(&return_not_equal);
1316 __ li(v0, Operand(1));
Ben Murdoch85b71792012-04-11 18:30:58 +01001317 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001318
1319 __ bind(&first_non_object);
1320 // Check for oddballs: true, false, null, undefined.
1321 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1322
1323 __ GetObjectType(rhs, a3, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001324 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001325
1326 // Check for oddballs: true, false, null, undefined.
1327 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1328
1329 // Now that we have the types we might as well check for symbol-symbol.
1330 // Ensure that no non-strings have the symbol bit set.
1331 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1332 STATIC_ASSERT(kSymbolTag != 0);
1333 __ And(t2, a2, Operand(a3));
1334 __ And(t0, t2, Operand(kIsSymbolMask));
1335 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1336}
1337
1338
1339static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1340 Register lhs,
1341 Register rhs,
1342 Label* both_loaded_as_doubles,
1343 Label* not_heap_numbers,
1344 Label* slow) {
1345 __ GetObjectType(lhs, a3, a2);
1346 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1347 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1348 // If first was a heap number & second wasn't, go to slow case.
1349 __ Branch(slow, ne, a3, Operand(a2));
1350
1351 // Both are heap numbers. Load them up then jump to the code we have
1352 // for that.
1353 if (CpuFeatures::IsSupported(FPU)) {
1354 CpuFeatures::Scope scope(FPU);
1355 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1356 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1357 } else {
1358 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1359 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1360 if (rhs.is(a0)) {
1361 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1362 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1363 } else {
1364 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1365 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1366 }
1367 }
1368 __ jmp(both_loaded_as_doubles);
1369}
1370
1371
1372// Fast negative check for symbol-to-symbol equality.
1373static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1374 Register lhs,
1375 Register rhs,
1376 Label* possible_strings,
1377 Label* not_both_strings) {
1378 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1379 (lhs.is(a1) && rhs.is(a0)));
1380
1381 // a2 is object type of lhs.
1382 // Ensure that no non-strings have the symbol bit set.
1383 Label object_test;
1384 STATIC_ASSERT(kSymbolTag != 0);
1385 __ And(at, a2, Operand(kIsNotStringMask));
1386 __ Branch(&object_test, ne, at, Operand(zero_reg));
1387 __ And(at, a2, Operand(kIsSymbolMask));
1388 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1389 __ GetObjectType(rhs, a3, a3);
1390 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1391 __ And(at, a3, Operand(kIsSymbolMask));
1392 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1393
1394 // Both are symbols. We already checked they weren't the same pointer
1395 // so they are not equal.
1396 __ li(v0, Operand(1)); // Non-zero indicates not equal.
Ben Murdoch85b71792012-04-11 18:30:58 +01001397 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001398
1399 __ bind(&object_test);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001400 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001401 __ GetObjectType(rhs, a2, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001402 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001403
1404 // If both objects are undetectable, they are equal. Otherwise, they
1405 // are not equal, since they are different objects and an object is not
1406 // equal to undefined.
1407 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1408 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1409 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1410 __ and_(a0, a2, a3);
1411 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
Ben Murdoch85b71792012-04-11 18:30:58 +01001412 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1413 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001414}
1415
1416
1417void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1418 Register object,
1419 Register result,
1420 Register scratch1,
1421 Register scratch2,
1422 Register scratch3,
1423 bool object_is_smi,
1424 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001425 // Use of registers. Register result is used as a temporary.
1426 Register number_string_cache = result;
1427 Register mask = scratch3;
1428
1429 // Load the number string cache.
1430 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1431
1432 // Make the hash mask from the length of the number string cache. It
1433 // contains two elements (number and string) for each cache entry.
1434 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1435 // Divide length by two (length is a smi).
1436 __ sra(mask, mask, kSmiTagSize + 1);
1437 __ Addu(mask, mask, -1); // Make mask.
1438
1439 // Calculate the entry in the number string cache. The hash value in the
1440 // number string cache for smis is just the smi value, and the hash for
1441 // doubles is the xor of the upper and lower words. See
1442 // Heap::GetNumberStringCache.
1443 Isolate* isolate = masm->isolate();
1444 Label is_smi;
1445 Label load_result_from_cache;
1446 if (!object_is_smi) {
1447 __ JumpIfSmi(object, &is_smi);
1448 if (CpuFeatures::IsSupported(FPU)) {
1449 CpuFeatures::Scope scope(FPU);
1450 __ CheckMap(object,
1451 scratch1,
1452 Heap::kHeapNumberMapRootIndex,
1453 not_found,
1454 DONT_DO_SMI_CHECK);
1455
1456 STATIC_ASSERT(8 == kDoubleSize);
1457 __ Addu(scratch1,
1458 object,
1459 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1460 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1461 __ lw(scratch1, MemOperand(scratch1, 0));
1462 __ Xor(scratch1, scratch1, Operand(scratch2));
1463 __ And(scratch1, scratch1, Operand(mask));
1464
1465 // Calculate address of entry in string cache: each entry consists
1466 // of two pointer sized fields.
1467 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1468 __ Addu(scratch1, number_string_cache, scratch1);
1469
1470 Register probe = mask;
1471 __ lw(probe,
1472 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1473 __ JumpIfSmi(probe, not_found);
1474 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1475 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01001476 __ c(EQ, D, f12, f14);
1477 __ bc1t(&load_result_from_cache);
1478 __ nop(); // bc1t() requires explicit fill of branch delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00001479 __ Branch(not_found);
1480 } else {
1481 // Note that there is no cache check for non-FPU case, even though
1482 // it seems there could be. May be a tiny opimization for non-FPU
1483 // cores.
1484 __ Branch(not_found);
1485 }
1486 }
1487
1488 __ bind(&is_smi);
1489 Register scratch = scratch1;
1490 __ sra(scratch, object, 1); // Shift away the tag.
1491 __ And(scratch, mask, Operand(scratch));
1492
1493 // Calculate address of entry in string cache: each entry consists
1494 // of two pointer sized fields.
1495 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1496 __ Addu(scratch, number_string_cache, scratch);
1497
1498 // Check if the entry is the smi we are looking for.
1499 Register probe = mask;
1500 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1501 __ Branch(not_found, ne, object, Operand(probe));
1502
1503 // Get the result from the cache.
1504 __ bind(&load_result_from_cache);
1505 __ lw(result,
1506 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1507
1508 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1509 1,
1510 scratch1,
1511 scratch2);
Steve Block44f0eee2011-05-26 01:26:41 +01001512}
1513
1514
1515void NumberToStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001516 Label runtime;
1517
1518 __ lw(a1, MemOperand(sp, 0));
1519
1520 // Generate code to lookup number in the number string cache.
1521 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
Ben Murdoch85b71792012-04-11 18:30:58 +01001522 __ Addu(sp, sp, Operand(1 * kPointerSize));
1523 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001524
1525 __ bind(&runtime);
1526 // Handle number to string in the runtime system if not found in the cache.
1527 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01001528}
1529
1530
1531// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1532// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1533// of the comparison.
1534void CompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001535 Label slow; // Call builtin.
1536 Label not_smis, both_loaded_as_doubles;
1537
1538
1539 if (include_smi_compare_) {
1540 Label not_two_smis, smi_done;
1541 __ Or(a2, a1, a0);
1542 __ JumpIfNotSmi(a2, &not_two_smis);
1543 __ sra(a1, a1, 1);
1544 __ sra(a0, a0, 1);
Ben Murdoch85b71792012-04-11 18:30:58 +01001545 __ Subu(v0, a1, a0);
1546 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001547 __ bind(&not_two_smis);
1548 } else if (FLAG_debug_code) {
1549 __ Or(a2, a1, a0);
1550 __ And(a2, a2, kSmiTagMask);
1551 __ Assert(ne, "CompareStub: unexpected smi operands.",
1552 a2, Operand(zero_reg));
1553 }
1554
1555
1556 // NOTICE! This code is only reached after a smi-fast-case check, so
1557 // it is certain that at least one operand isn't a smi.
1558
1559 // Handle the case where the objects are identical. Either returns the answer
1560 // or goes to slow. Only falls through if the objects were not identical.
1561 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1562
1563 // If either is a Smi (we know that not both are), then they can only
1564 // be strictly equal if the other is a HeapNumber.
1565 STATIC_ASSERT(kSmiTag == 0);
1566 ASSERT_EQ(0, Smi::FromInt(0));
1567 __ And(t2, lhs_, Operand(rhs_));
1568 __ JumpIfNotSmi(t2, &not_smis, t0);
1569 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1570 // 1) Return the answer.
1571 // 2) Go to slow.
1572 // 3) Fall through to both_loaded_as_doubles.
1573 // 4) Jump to rhs_not_nan.
1574 // In cases 3 and 4 we have found out we were dealing with a number-number
1575 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1576 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1577 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1578 &both_loaded_as_doubles, &slow, strict_);
1579
1580 __ bind(&both_loaded_as_doubles);
1581 // f12, f14 are the double representations of the left hand side
1582 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1583 // left hand side and a0, a1 represent right hand side.
1584
1585 Isolate* isolate = masm->isolate();
1586 if (CpuFeatures::IsSupported(FPU)) {
1587 CpuFeatures::Scope scope(FPU);
1588 Label nan;
1589 __ li(t0, Operand(LESS));
1590 __ li(t1, Operand(GREATER));
1591 __ li(t2, Operand(EQUAL));
1592
1593 // Check if either rhs or lhs is NaN.
Ben Murdoch85b71792012-04-11 18:30:58 +01001594 __ c(UN, D, f12, f14);
1595 __ bc1t(&nan);
1596 __ nop();
Ben Murdoch257744e2011-11-30 15:57:28 +00001597
1598 // Check if LESS condition is satisfied. If true, move conditionally
1599 // result to v0.
1600 __ c(OLT, D, f12, f14);
Ben Murdoch85b71792012-04-11 18:30:58 +01001601 __ movt(v0, t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00001602 // Use previous check to store conditionally to v0 oposite condition
1603 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1604 // check.
Ben Murdoch85b71792012-04-11 18:30:58 +01001605 __ movf(v0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00001606 // Check if EQUAL condition is satisfied. If true, move conditionally
1607 // result to v0.
1608 __ c(EQ, D, f12, f14);
Ben Murdoch85b71792012-04-11 18:30:58 +01001609 __ movt(v0, t2);
Ben Murdoch257744e2011-11-30 15:57:28 +00001610
1611 __ Ret();
1612
1613 __ bind(&nan);
1614 // NaN comparisons always fail.
1615 // Load whatever we need in v0 to make the comparison fail.
1616 if (cc_ == lt || cc_ == le) {
1617 __ li(v0, Operand(GREATER));
1618 } else {
1619 __ li(v0, Operand(LESS));
1620 }
1621 __ Ret();
1622 } else {
1623 // Checks for NaN in the doubles we have loaded. Can return the answer or
1624 // fall through if neither is a NaN. Also binds rhs_not_nan.
1625 EmitNanCheck(masm, cc_);
1626
1627 // Compares two doubles that are not NaNs. Returns the answer.
1628 // Never falls through.
1629 EmitTwoNonNanDoubleComparison(masm, cc_);
1630 }
1631
1632 __ bind(&not_smis);
1633 // At this point we know we are dealing with two different objects,
1634 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1635 if (strict_) {
1636 // This returns non-equal for some object types, or falls through if it
1637 // was not lucky.
1638 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1639 }
1640
1641 Label check_for_symbols;
1642 Label flat_string_check;
1643 // Check for heap-number-heap-number comparison. Can jump to slow case,
1644 // or load both doubles and jump to the code that handles
1645 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1646 // In this case a2 will contain the type of lhs_.
1647 EmitCheckForTwoHeapNumbers(masm,
1648 lhs_,
1649 rhs_,
1650 &both_loaded_as_doubles,
1651 &check_for_symbols,
1652 &flat_string_check);
1653
1654 __ bind(&check_for_symbols);
1655 if (cc_ == eq && !strict_) {
1656 // Returns an answer for two symbols or two detectable objects.
1657 // Otherwise jumps to string case or not both strings case.
1658 // Assumes that a2 is the type of lhs_ on entry.
1659 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1660 }
1661
1662 // Check for both being sequential ASCII strings, and inline if that is the
1663 // case.
1664 __ bind(&flat_string_check);
1665
1666 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1667
1668 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1669 if (cc_ == eq) {
1670 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1671 lhs_,
1672 rhs_,
1673 a2,
1674 a3,
1675 t0);
1676 } else {
1677 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1678 lhs_,
1679 rhs_,
1680 a2,
1681 a3,
1682 t0,
1683 t1);
1684 }
1685 // Never falls through to here.
1686
1687 __ bind(&slow);
1688 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1689 // a1 (rhs) second.
1690 __ Push(lhs_, rhs_);
1691 // Figure out which native to call and setup the arguments.
1692 Builtins::JavaScript native;
1693 if (cc_ == eq) {
1694 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1695 } else {
1696 native = Builtins::COMPARE;
1697 int ncr; // NaN compare result.
1698 if (cc_ == lt || cc_ == le) {
1699 ncr = GREATER;
1700 } else {
1701 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1702 ncr = LESS;
1703 }
1704 __ li(a0, Operand(Smi::FromInt(ncr)));
1705 __ push(a0);
1706 }
1707
1708 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1709 // tagged as a small integer.
1710 __ InvokeBuiltin(native, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +01001711}
1712
1713
Ben Murdoch85b71792012-04-11 18:30:58 +01001714// The stub returns zero for false, and a non-zero value for true.
Steve Block44f0eee2011-05-26 01:26:41 +01001715void ToBooleanStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001716 // This stub uses FPU instructions.
1717 CpuFeatures::Scope scope(FPU);
1718
Ben Murdoch85b71792012-04-11 18:30:58 +01001719 Label false_result;
1720 Label not_heap_number;
1721 Register scratch0 = t5.is(tos_) ? t3 : t5;
Ben Murdoch257744e2011-11-30 15:57:28 +00001722
Ben Murdoch85b71792012-04-11 18:30:58 +01001723 // undefined -> false
1724 __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
1725 __ Branch(&false_result, eq, tos_, Operand(scratch0));
Ben Murdoch257744e2011-11-30 15:57:28 +00001726
Ben Murdoch85b71792012-04-11 18:30:58 +01001727 // Boolean -> its value
1728 __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
1729 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1730 __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
1731 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1732 // return true if the equal condition is satisfied.
1733 __ Ret(eq, tos_, Operand(scratch0));
Ben Murdoch257744e2011-11-30 15:57:28 +00001734
Ben Murdoch85b71792012-04-11 18:30:58 +01001735 // Smis: 0 -> false, all other -> true
1736 __ And(scratch0, tos_, tos_);
1737 __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
1738 __ And(scratch0, tos_, Operand(kSmiTagMask));
1739 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1740 // return true if the not equal condition is satisfied.
1741 __ Ret(eq, scratch0, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00001742
Ben Murdoch85b71792012-04-11 18:30:58 +01001743 // 'null' -> false
1744 __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
1745 __ Branch(&false_result, eq, tos_, Operand(scratch0));
Ben Murdoch257744e2011-11-30 15:57:28 +00001746
Ben Murdoch85b71792012-04-11 18:30:58 +01001747 // HeapNumber => false if +0, -0, or NaN.
1748 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1749 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1750 __ Branch(&not_heap_number, ne, scratch0, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +00001751
Ben Murdoch85b71792012-04-11 18:30:58 +01001752 __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1753 __ fcmp(f12, 0.0, UEQ);
Ben Murdoch257744e2011-11-30 15:57:28 +00001754
Ben Murdoch85b71792012-04-11 18:30:58 +01001755 // "tos_" is a register, and contains a non zero value by default.
1756 // Hence we only need to overwrite "tos_" with zero to return false for
1757 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1758 __ movt(tos_, zero_reg);
1759 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001760
Ben Murdoch85b71792012-04-11 18:30:58 +01001761 __ bind(&not_heap_number);
Ben Murdoch257744e2011-11-30 15:57:28 +00001762
Ben Murdoch85b71792012-04-11 18:30:58 +01001763 // It can be an undetectable object.
1764 // Undetectable => false.
1765 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
1766 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
1767 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
1768 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
Ben Murdoch257744e2011-11-30 15:57:28 +00001769
Ben Murdoch85b71792012-04-11 18:30:58 +01001770 // JavaScript object => true.
1771 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1772 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00001773
Ben Murdoch85b71792012-04-11 18:30:58 +01001774 // "tos_" is a register and contains a non-zero value.
1775 // Hence we implicitly return true if the greater than
1776 // condition is satisfied.
1777 __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001778
Ben Murdoch85b71792012-04-11 18:30:58 +01001779 // Check for string.
1780 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1781 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1782 // "tos_" is a register and contains a non-zero value.
1783 // Hence we implicitly return true if the greater than
1784 // condition is satisfied.
1785 __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001786
Ben Murdoch85b71792012-04-11 18:30:58 +01001787 // String value => false iff empty, i.e., length is zero.
1788 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1789 // If length is zero, "tos_" contains zero ==> false.
1790 // If length is not zero, "tos_" contains a non-zero value ==> true.
1791 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001792
Ben Murdoch85b71792012-04-11 18:30:58 +01001793 // Return 0 in "tos_" for false.
1794 __ bind(&false_result);
1795 __ mov(tos_, zero_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00001796 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001797}
1798
1799
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001800void UnaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001801 const char* op_name = Token::Name(op_);
1802 const char* overwrite_name = NULL; // Make g++ happy.
1803 switch (mode_) {
1804 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1805 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1806 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001807 stream->Add("UnaryOpStub_%s_%s_%s",
1808 op_name,
1809 overwrite_name,
1810 UnaryOpIC::GetName(operand_type_));
Ben Murdoch257744e2011-11-30 15:57:28 +00001811}
1812
1813
1814// TODO(svenpanne): Use virtual functions instead of switch.
1815void UnaryOpStub::Generate(MacroAssembler* masm) {
1816 switch (operand_type_) {
1817 case UnaryOpIC::UNINITIALIZED:
1818 GenerateTypeTransition(masm);
1819 break;
1820 case UnaryOpIC::SMI:
1821 GenerateSmiStub(masm);
1822 break;
1823 case UnaryOpIC::HEAP_NUMBER:
1824 GenerateHeapNumberStub(masm);
1825 break;
1826 case UnaryOpIC::GENERIC:
1827 GenerateGenericStub(masm);
1828 break;
1829 }
1830}
1831
1832
1833void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1834 // Argument is in a0 and v0 at this point, so we can overwrite a0.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001835 __ li(a2, Operand(Smi::FromInt(op_)));
1836 __ li(a1, Operand(Smi::FromInt(mode_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00001837 __ li(a0, Operand(Smi::FromInt(operand_type_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00001838 __ Push(v0, a2, a1, a0);
1839
1840 __ TailCallExternalReference(
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001841 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00001842}
1843
1844
1845// TODO(svenpanne): Use virtual functions instead of switch.
1846void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1847 switch (op_) {
1848 case Token::SUB:
1849 GenerateSmiStubSub(masm);
1850 break;
1851 case Token::BIT_NOT:
1852 GenerateSmiStubBitNot(masm);
1853 break;
1854 default:
1855 UNREACHABLE();
1856 }
1857}
1858
1859
1860void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1861 Label non_smi, slow;
1862 GenerateSmiCodeSub(masm, &non_smi, &slow);
1863 __ bind(&non_smi);
1864 __ bind(&slow);
1865 GenerateTypeTransition(masm);
1866}
1867
1868
1869void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1870 Label non_smi;
1871 GenerateSmiCodeBitNot(masm, &non_smi);
1872 __ bind(&non_smi);
1873 GenerateTypeTransition(masm);
1874}
1875
1876
1877void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1878 Label* non_smi,
1879 Label* slow) {
1880 __ JumpIfNotSmi(a0, non_smi);
1881
1882 // The result of negating zero or the smallest negative smi is not a smi.
1883 __ And(t0, a0, ~0x80000000);
1884 __ Branch(slow, eq, t0, Operand(zero_reg));
1885
1886 // Return '0 - value'.
Ben Murdoch85b71792012-04-11 18:30:58 +01001887 __ Subu(v0, zero_reg, a0);
1888 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00001889}
1890
1891
1892void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1893 Label* non_smi) {
1894 __ JumpIfNotSmi(a0, non_smi);
1895
1896 // Flip bits and revert inverted smi-tag.
1897 __ Neg(v0, a0);
1898 __ And(v0, v0, ~kSmiTagMask);
1899 __ Ret();
1900}
1901
1902
1903// TODO(svenpanne): Use virtual functions instead of switch.
1904void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1905 switch (op_) {
1906 case Token::SUB:
1907 GenerateHeapNumberStubSub(masm);
1908 break;
1909 case Token::BIT_NOT:
1910 GenerateHeapNumberStubBitNot(masm);
1911 break;
1912 default:
1913 UNREACHABLE();
1914 }
1915}
1916
1917
1918void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
1919 Label non_smi, slow, call_builtin;
1920 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1921 __ bind(&non_smi);
1922 GenerateHeapNumberCodeSub(masm, &slow);
1923 __ bind(&slow);
1924 GenerateTypeTransition(masm);
1925 __ bind(&call_builtin);
1926 GenerateGenericCodeFallback(masm);
1927}
1928
1929
1930void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
1931 Label non_smi, slow;
1932 GenerateSmiCodeBitNot(masm, &non_smi);
1933 __ bind(&non_smi);
1934 GenerateHeapNumberCodeBitNot(masm, &slow);
1935 __ bind(&slow);
1936 GenerateTypeTransition(masm);
1937}
1938
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001939
Ben Murdoch257744e2011-11-30 15:57:28 +00001940void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1941 Label* slow) {
1942 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1943 // a0 is a heap number. Get a new heap number in a1.
1944 if (mode_ == UNARY_OVERWRITE) {
1945 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1946 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1947 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1948 } else {
1949 Label slow_allocate_heapnumber, heapnumber_allocated;
1950 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1951 __ jmp(&heapnumber_allocated);
1952
1953 __ bind(&slow_allocate_heapnumber);
Ben Murdoch85b71792012-04-11 18:30:58 +01001954 __ EnterInternalFrame();
1955 __ push(a0);
1956 __ CallRuntime(Runtime::kNumberAlloc, 0);
1957 __ mov(a1, v0);
1958 __ pop(a0);
1959 __ LeaveInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00001960
1961 __ bind(&heapnumber_allocated);
1962 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1963 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1964 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1965 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1966 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1967 __ mov(v0, a1);
1968 }
1969 __ Ret();
1970}
1971
1972
1973void UnaryOpStub::GenerateHeapNumberCodeBitNot(
1974 MacroAssembler* masm,
1975 Label* slow) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001976 Label impossible;
1977
Ben Murdoch257744e2011-11-30 15:57:28 +00001978 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1979 // Convert the heap number in a0 to an untagged integer in a1.
1980 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
1981
1982 // Do the bitwise operation and check if the result fits in a smi.
1983 Label try_float;
1984 __ Neg(a1, a1);
1985 __ Addu(a2, a1, Operand(0x40000000));
1986 __ Branch(&try_float, lt, a2, Operand(zero_reg));
1987
1988 // Tag the result as a smi and we're done.
1989 __ SmiTag(v0, a1);
1990 __ Ret();
1991
1992 // Try to store the result in a heap number.
1993 __ bind(&try_float);
1994 if (mode_ == UNARY_NO_OVERWRITE) {
1995 Label slow_allocate_heapnumber, heapnumber_allocated;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001996 // Allocate a new heap number without zapping v0, which we need if it fails.
1997 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
Ben Murdoch257744e2011-11-30 15:57:28 +00001998 __ jmp(&heapnumber_allocated);
1999
2000 __ bind(&slow_allocate_heapnumber);
Ben Murdoch85b71792012-04-11 18:30:58 +01002001 __ EnterInternalFrame();
2002 __ push(v0); // Push the heap number, not the untagged int32.
2003 __ CallRuntime(Runtime::kNumberAlloc, 0);
2004 __ mov(a2, v0); // Move the new heap number into a2.
2005 // Get the heap number into v0, now that the new heap number is in a2.
2006 __ pop(v0);
2007 __ LeaveInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00002008
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002009 // Convert the heap number in v0 to an untagged integer in a1.
2010 // This can't go slow-case because it's the same number we already
2011 // converted once again.
2012 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2013 // Negate the result.
2014 __ Xor(a1, a1, -1);
2015
Ben Murdoch257744e2011-11-30 15:57:28 +00002016 __ bind(&heapnumber_allocated);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002017 __ mov(v0, a2); // Move newly allocated heap number to v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00002018 }
2019
2020 if (CpuFeatures::IsSupported(FPU)) {
2021 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2022 CpuFeatures::Scope scope(FPU);
2023 __ mtc1(a1, f0);
2024 __ cvt_d_w(f0, f0);
2025 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2026 __ Ret();
2027 } else {
2028 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2029 // have to set up a frame.
2030 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2031 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2032 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002033
2034 __ bind(&impossible);
2035 if (FLAG_debug_code) {
2036 __ stop("Incorrect assumption in bit-not stub");
2037 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002038}
2039
2040
2041// TODO(svenpanne): Use virtual functions instead of switch.
2042void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2043 switch (op_) {
2044 case Token::SUB:
2045 GenerateGenericStubSub(masm);
2046 break;
2047 case Token::BIT_NOT:
2048 GenerateGenericStubBitNot(masm);
2049 break;
2050 default:
2051 UNREACHABLE();
2052 }
2053}
2054
2055
2056void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2057 Label non_smi, slow;
2058 GenerateSmiCodeSub(masm, &non_smi, &slow);
2059 __ bind(&non_smi);
2060 GenerateHeapNumberCodeSub(masm, &slow);
2061 __ bind(&slow);
2062 GenerateGenericCodeFallback(masm);
2063}
2064
2065
2066void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2067 Label non_smi, slow;
2068 GenerateSmiCodeBitNot(masm, &non_smi);
2069 __ bind(&non_smi);
2070 GenerateHeapNumberCodeBitNot(masm, &slow);
2071 __ bind(&slow);
2072 GenerateGenericCodeFallback(masm);
2073}
2074
2075
2076void UnaryOpStub::GenerateGenericCodeFallback(
2077 MacroAssembler* masm) {
2078 // Handle the slow case by jumping to the JavaScript builtin.
2079 __ push(a0);
2080 switch (op_) {
2081 case Token::SUB:
2082 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2083 break;
2084 case Token::BIT_NOT:
2085 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2086 break;
2087 default:
2088 UNREACHABLE();
2089 }
2090}
2091
2092
Ben Murdoch257744e2011-11-30 15:57:28 +00002093void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2094 Label get_result;
2095
2096 __ Push(a1, a0);
2097
2098 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2099 __ li(a1, Operand(Smi::FromInt(op_)));
2100 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2101 __ Push(a2, a1, a0);
2102
2103 __ TailCallExternalReference(
2104 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2105 masm->isolate()),
2106 5,
2107 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002108}
2109
2110
Ben Murdoch257744e2011-11-30 15:57:28 +00002111void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
Steve Block44f0eee2011-05-26 01:26:41 +01002112 MacroAssembler* masm) {
2113 UNIMPLEMENTED();
2114}
2115
2116
Ben Murdoch257744e2011-11-30 15:57:28 +00002117void BinaryOpStub::Generate(MacroAssembler* masm) {
2118 switch (operands_type_) {
2119 case BinaryOpIC::UNINITIALIZED:
2120 GenerateTypeTransition(masm);
2121 break;
2122 case BinaryOpIC::SMI:
2123 GenerateSmiStub(masm);
2124 break;
2125 case BinaryOpIC::INT32:
2126 GenerateInt32Stub(masm);
2127 break;
2128 case BinaryOpIC::HEAP_NUMBER:
2129 GenerateHeapNumberStub(masm);
2130 break;
2131 case BinaryOpIC::ODDBALL:
2132 GenerateOddballStub(masm);
2133 break;
2134 case BinaryOpIC::BOTH_STRING:
2135 GenerateBothStringStub(masm);
2136 break;
2137 case BinaryOpIC::STRING:
2138 GenerateStringStub(masm);
2139 break;
2140 case BinaryOpIC::GENERIC:
2141 GenerateGeneric(masm);
2142 break;
2143 default:
2144 UNREACHABLE();
2145 }
Steve Block44f0eee2011-05-26 01:26:41 +01002146}
2147
2148
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002149void BinaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002150 const char* op_name = Token::Name(op_);
2151 const char* overwrite_name;
2152 switch (mode_) {
2153 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2154 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2155 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2156 default: overwrite_name = "UnknownOverwrite"; break;
2157 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002158 stream->Add("BinaryOpStub_%s_%s_%s",
2159 op_name,
2160 overwrite_name,
2161 BinaryOpIC::GetName(operands_type_));
Steve Block44f0eee2011-05-26 01:26:41 +01002162}
2163
2164
2165
Ben Murdoch257744e2011-11-30 15:57:28 +00002166void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2167 Register left = a1;
2168 Register right = a0;
2169
2170 Register scratch1 = t0;
2171 Register scratch2 = t1;
2172
2173 ASSERT(right.is(a0));
2174 STATIC_ASSERT(kSmiTag == 0);
2175
2176 Label not_smi_result;
2177 switch (op_) {
2178 case Token::ADD:
2179 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2180 __ RetOnNoOverflow(scratch1);
2181 // No need to revert anything - right and left are intact.
2182 break;
2183 case Token::SUB:
2184 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2185 __ RetOnNoOverflow(scratch1);
2186 // No need to revert anything - right and left are intact.
2187 break;
2188 case Token::MUL: {
2189 // Remove tag from one of the operands. This way the multiplication result
2190 // will be a smi if it fits the smi range.
2191 __ SmiUntag(scratch1, right);
2192 // Do multiplication.
2193 // lo = lower 32 bits of scratch1 * left.
2194 // hi = higher 32 bits of scratch1 * left.
2195 __ Mult(left, scratch1);
2196 // Check for overflowing the smi range - no overflow if higher 33 bits of
2197 // the result are identical.
2198 __ mflo(scratch1);
2199 __ mfhi(scratch2);
2200 __ sra(scratch1, scratch1, 31);
2201 __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2202 // Go slow on zero result to handle -0.
2203 __ mflo(v0);
2204 __ Ret(ne, v0, Operand(zero_reg));
2205 // We need -0 if we were multiplying a negative number with 0 to get 0.
2206 // We know one of them was zero.
2207 __ Addu(scratch2, right, left);
2208 Label skip;
2209 // ARM uses the 'pl' condition, which is 'ge'.
2210 // Negating it results in 'lt'.
2211 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2212 ASSERT(Smi::FromInt(0) == 0);
Ben Murdoch85b71792012-04-11 18:30:58 +01002213 __ mov(v0, zero_reg);
2214 __ Ret(); // Return smi 0 if the non-zero one was positive.
Ben Murdoch257744e2011-11-30 15:57:28 +00002215 __ bind(&skip);
2216 // We fall through here if we multiplied a negative number with 0, because
2217 // that would mean we should produce -0.
2218 }
2219 break;
2220 case Token::DIV: {
2221 Label done;
2222 __ SmiUntag(scratch2, right);
2223 __ SmiUntag(scratch1, left);
2224 __ Div(scratch1, scratch2);
2225 // A minor optimization: div may be calculated asynchronously, so we check
2226 // for division by zero before getting the result.
2227 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2228 // If the result is 0, we need to make sure the dividsor (right) is
2229 // positive, otherwise it is a -0 case.
2230 // Quotient is in 'lo', remainder is in 'hi'.
2231 // Check for no remainder first.
2232 __ mfhi(scratch1);
2233 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2234 __ mflo(scratch1);
2235 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2236 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2237 __ bind(&done);
2238 // Check that the signed result fits in a Smi.
2239 __ Addu(scratch2, scratch1, Operand(0x40000000));
2240 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2241 __ SmiTag(v0, scratch1);
2242 __ Ret();
2243 }
2244 break;
2245 case Token::MOD: {
2246 Label done;
2247 __ SmiUntag(scratch2, right);
2248 __ SmiUntag(scratch1, left);
2249 __ Div(scratch1, scratch2);
2250 // A minor optimization: div may be calculated asynchronously, so we check
2251 // for division by 0 before calling mfhi.
2252 // Check for zero on the right hand side.
2253 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2254 // If the result is 0, we need to make sure the dividend (left) is
2255 // positive (or 0), otherwise it is a -0 case.
2256 // Remainder is in 'hi'.
2257 __ mfhi(scratch2);
2258 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2259 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2260 __ bind(&done);
2261 // Check that the signed result fits in a Smi.
2262 __ Addu(scratch1, scratch2, Operand(0x40000000));
2263 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2264 __ SmiTag(v0, scratch2);
2265 __ Ret();
2266 }
2267 break;
2268 case Token::BIT_OR:
Ben Murdoch85b71792012-04-11 18:30:58 +01002269 __ Or(v0, left, Operand(right));
2270 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00002271 break;
2272 case Token::BIT_AND:
Ben Murdoch85b71792012-04-11 18:30:58 +01002273 __ And(v0, left, Operand(right));
2274 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00002275 break;
2276 case Token::BIT_XOR:
Ben Murdoch85b71792012-04-11 18:30:58 +01002277 __ Xor(v0, left, Operand(right));
2278 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00002279 break;
2280 case Token::SAR:
2281 // Remove tags from right operand.
2282 __ GetLeastBitsFromSmi(scratch1, right, 5);
2283 __ srav(scratch1, left, scratch1);
2284 // Smi tag result.
Ben Murdoch85b71792012-04-11 18:30:58 +01002285 __ And(v0, scratch1, Operand(~kSmiTagMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00002286 __ Ret();
2287 break;
2288 case Token::SHR:
2289 // Remove tags from operands. We can't do this on a 31 bit number
2290 // because then the 0s get shifted into bit 30 instead of bit 31.
2291 __ SmiUntag(scratch1, left);
2292 __ GetLeastBitsFromSmi(scratch2, right, 5);
2293 __ srlv(v0, scratch1, scratch2);
2294 // Unsigned shift is not allowed to produce a negative number, so
2295 // check the sign bit and the sign bit after Smi tagging.
2296 __ And(scratch1, v0, Operand(0xc0000000));
2297 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2298 // Smi tag result.
2299 __ SmiTag(v0);
2300 __ Ret();
2301 break;
2302 case Token::SHL:
2303 // Remove tags from operands.
2304 __ SmiUntag(scratch1, left);
2305 __ GetLeastBitsFromSmi(scratch2, right, 5);
2306 __ sllv(scratch1, scratch1, scratch2);
2307 // Check that the signed result fits in a Smi.
2308 __ Addu(scratch2, scratch1, Operand(0x40000000));
2309 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2310 __ SmiTag(v0, scratch1);
2311 __ Ret();
2312 break;
2313 default:
2314 UNREACHABLE();
2315 }
2316 __ bind(&not_smi_result);
Steve Block44f0eee2011-05-26 01:26:41 +01002317}
2318
2319
Ben Murdoch257744e2011-11-30 15:57:28 +00002320void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2321 bool smi_operands,
2322 Label* not_numbers,
2323 Label* gc_required) {
2324 Register left = a1;
2325 Register right = a0;
2326 Register scratch1 = t3;
2327 Register scratch2 = t5;
2328 Register scratch3 = t0;
2329
2330 ASSERT(smi_operands || (not_numbers != NULL));
2331 if (smi_operands && FLAG_debug_code) {
2332 __ AbortIfNotSmi(left);
2333 __ AbortIfNotSmi(right);
2334 }
2335
2336 Register heap_number_map = t2;
2337 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2338
2339 switch (op_) {
2340 case Token::ADD:
2341 case Token::SUB:
2342 case Token::MUL:
2343 case Token::DIV:
2344 case Token::MOD: {
2345 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2346 // depending on whether FPU is available or not.
2347 FloatingPointHelper::Destination destination =
2348 CpuFeatures::IsSupported(FPU) &&
2349 op_ != Token::MOD ?
2350 FloatingPointHelper::kFPURegisters :
2351 FloatingPointHelper::kCoreRegisters;
2352
2353 // Allocate new heap number for result.
2354 Register result = s0;
2355 GenerateHeapResultAllocation(
2356 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2357
2358 // Load the operands.
2359 if (smi_operands) {
2360 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2361 } else {
2362 FloatingPointHelper::LoadOperands(masm,
2363 destination,
2364 heap_number_map,
2365 scratch1,
2366 scratch2,
2367 not_numbers);
2368 }
2369
2370 // Calculate the result.
2371 if (destination == FloatingPointHelper::kFPURegisters) {
2372 // Using FPU registers:
2373 // f12: Left value.
2374 // f14: Right value.
2375 CpuFeatures::Scope scope(FPU);
2376 switch (op_) {
2377 case Token::ADD:
2378 __ add_d(f10, f12, f14);
2379 break;
2380 case Token::SUB:
2381 __ sub_d(f10, f12, f14);
2382 break;
2383 case Token::MUL:
2384 __ mul_d(f10, f12, f14);
2385 break;
2386 case Token::DIV:
2387 __ div_d(f10, f12, f14);
2388 break;
2389 default:
2390 UNREACHABLE();
2391 }
2392
2393 // ARM uses a workaround here because of the unaligned HeapNumber
2394 // kValueOffset. On MIPS this workaround is built into sdc1 so
2395 // there's no point in generating even more instructions.
2396 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2397 __ mov(v0, result);
Ben Murdoch85b71792012-04-11 18:30:58 +01002398 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00002399 } else {
2400 // Call the C function to handle the double operation.
2401 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2402 op_,
2403 result,
2404 scratch1);
2405 if (FLAG_debug_code) {
2406 __ stop("Unreachable code.");
2407 }
2408 }
2409 break;
2410 }
2411 case Token::BIT_OR:
2412 case Token::BIT_XOR:
2413 case Token::BIT_AND:
2414 case Token::SAR:
2415 case Token::SHR:
2416 case Token::SHL: {
2417 if (smi_operands) {
2418 __ SmiUntag(a3, left);
2419 __ SmiUntag(a2, right);
2420 } else {
2421 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2422 FloatingPointHelper::ConvertNumberToInt32(masm,
2423 left,
2424 a3,
2425 heap_number_map,
2426 scratch1,
2427 scratch2,
2428 scratch3,
2429 f0,
2430 not_numbers);
2431 FloatingPointHelper::ConvertNumberToInt32(masm,
2432 right,
2433 a2,
2434 heap_number_map,
2435 scratch1,
2436 scratch2,
2437 scratch3,
2438 f0,
2439 not_numbers);
2440 }
2441 Label result_not_a_smi;
2442 switch (op_) {
2443 case Token::BIT_OR:
2444 __ Or(a2, a3, Operand(a2));
2445 break;
2446 case Token::BIT_XOR:
2447 __ Xor(a2, a3, Operand(a2));
2448 break;
2449 case Token::BIT_AND:
2450 __ And(a2, a3, Operand(a2));
2451 break;
2452 case Token::SAR:
2453 // Use only the 5 least significant bits of the shift count.
2454 __ GetLeastBitsFromInt32(a2, a2, 5);
2455 __ srav(a2, a3, a2);
2456 break;
2457 case Token::SHR:
2458 // Use only the 5 least significant bits of the shift count.
2459 __ GetLeastBitsFromInt32(a2, a2, 5);
2460 __ srlv(a2, a3, a2);
2461 // SHR is special because it is required to produce a positive answer.
2462 // The code below for writing into heap numbers isn't capable of
2463 // writing the register as an unsigned int so we go to slow case if we
2464 // hit this case.
2465 if (CpuFeatures::IsSupported(FPU)) {
2466 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2467 } else {
2468 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2469 }
2470 break;
2471 case Token::SHL:
2472 // Use only the 5 least significant bits of the shift count.
2473 __ GetLeastBitsFromInt32(a2, a2, 5);
2474 __ sllv(a2, a3, a2);
2475 break;
2476 default:
2477 UNREACHABLE();
2478 }
2479 // Check that the *signed* result fits in a smi.
2480 __ Addu(a3, a2, Operand(0x40000000));
2481 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2482 __ SmiTag(v0, a2);
2483 __ Ret();
2484
2485 // Allocate new heap number for result.
2486 __ bind(&result_not_a_smi);
2487 Register result = t1;
2488 if (smi_operands) {
2489 __ AllocateHeapNumber(
2490 result, scratch1, scratch2, heap_number_map, gc_required);
2491 } else {
2492 GenerateHeapResultAllocation(
2493 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2494 }
2495
2496 // a2: Answer as signed int32.
2497 // t1: Heap number to write answer into.
2498
2499 // Nothing can go wrong now, so move the heap number to v0, which is the
2500 // result.
2501 __ mov(v0, t1);
2502
2503 if (CpuFeatures::IsSupported(FPU)) {
2504 // Convert the int32 in a2 to the heap number in a0. As
2505 // mentioned above SHR needs to always produce a positive result.
2506 CpuFeatures::Scope scope(FPU);
2507 __ mtc1(a2, f0);
2508 if (op_ == Token::SHR) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002509 __ Cvt_d_uw(f0, f0, f22);
Ben Murdoch257744e2011-11-30 15:57:28 +00002510 } else {
2511 __ cvt_d_w(f0, f0);
2512 }
2513 // ARM uses a workaround here because of the unaligned HeapNumber
2514 // kValueOffset. On MIPS this workaround is built into sdc1 so
2515 // there's no point in generating even more instructions.
2516 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2517 __ Ret();
2518 } else {
2519 // Tail call that writes the int32 in a2 to the heap number in v0, using
2520 // a3 and a0 as scratch. v0 is preserved and returned.
2521 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2522 __ TailCallStub(&stub);
2523 }
2524 break;
2525 }
2526 default:
2527 UNREACHABLE();
2528 }
Steve Block44f0eee2011-05-26 01:26:41 +01002529}
2530
2531
2532// Generate the smi code. If the operation on smis are successful this return is
2533// generated. If the result is not a smi and heap number allocation is not
2534// requested the code falls through. If number allocation is requested but a
2535// heap number cannot be allocated the code jumps to the lable gc_required.
Ben Murdoch257744e2011-11-30 15:57:28 +00002536void BinaryOpStub::GenerateSmiCode(
2537 MacroAssembler* masm,
2538 Label* use_runtime,
Steve Block44f0eee2011-05-26 01:26:41 +01002539 Label* gc_required,
2540 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002541 Label not_smis;
2542
2543 Register left = a1;
2544 Register right = a0;
2545 Register scratch1 = t3;
2546 Register scratch2 = t5;
2547
2548 // Perform combined smi check on both operands.
2549 __ Or(scratch1, left, Operand(right));
2550 STATIC_ASSERT(kSmiTag == 0);
2551 __ JumpIfNotSmi(scratch1, &not_smis);
2552
2553 // If the smi-smi operation results in a smi return is generated.
2554 GenerateSmiSmiOperation(masm);
2555
2556 // If heap number results are possible generate the result in an allocated
2557 // heap number.
2558 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2559 GenerateFPOperation(masm, true, use_runtime, gc_required);
2560 }
2561 __ bind(&not_smis);
Steve Block44f0eee2011-05-26 01:26:41 +01002562}
2563
2564
Ben Murdoch257744e2011-11-30 15:57:28 +00002565void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2566 Label not_smis, call_runtime;
2567
2568 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2569 result_type_ == BinaryOpIC::SMI) {
2570 // Only allow smi results.
2571 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2572 } else {
2573 // Allow heap number result and don't make a transition if a heap number
2574 // cannot be allocated.
2575 GenerateSmiCode(masm,
2576 &call_runtime,
2577 &call_runtime,
2578 ALLOW_HEAPNUMBER_RESULTS);
2579 }
2580
2581 // Code falls through if the result is not returned as either a smi or heap
2582 // number.
2583 GenerateTypeTransition(masm);
2584
2585 __ bind(&call_runtime);
2586 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002587}
2588
2589
Ben Murdoch257744e2011-11-30 15:57:28 +00002590void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2591 ASSERT(operands_type_ == BinaryOpIC::STRING);
2592 // Try to add arguments as strings, otherwise, transition to the generic
2593 // BinaryOpIC type.
2594 GenerateAddStrings(masm);
2595 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002596}
2597
2598
Ben Murdoch257744e2011-11-30 15:57:28 +00002599void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2600 Label call_runtime;
2601 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2602 ASSERT(op_ == Token::ADD);
2603 // If both arguments are strings, call the string add stub.
2604 // Otherwise, do a transition.
2605
2606 // Registers containing left and right operands respectively.
2607 Register left = a1;
2608 Register right = a0;
2609
2610 // Test if left operand is a string.
2611 __ JumpIfSmi(left, &call_runtime);
2612 __ GetObjectType(left, a2, a2);
2613 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2614
2615 // Test if right operand is a string.
2616 __ JumpIfSmi(right, &call_runtime);
2617 __ GetObjectType(right, a2, a2);
2618 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2619
2620 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2621 GenerateRegisterArgsPush(masm);
2622 __ TailCallStub(&string_add_stub);
2623
2624 __ bind(&call_runtime);
2625 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002626}
2627
2628
Ben Murdoch257744e2011-11-30 15:57:28 +00002629void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2630 ASSERT(operands_type_ == BinaryOpIC::INT32);
2631
2632 Register left = a1;
2633 Register right = a0;
2634 Register scratch1 = t3;
2635 Register scratch2 = t5;
2636 FPURegister double_scratch = f0;
2637 FPURegister single_scratch = f6;
2638
2639 Register heap_number_result = no_reg;
2640 Register heap_number_map = t2;
2641 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2642
2643 Label call_runtime;
2644 // Labels for type transition, used for wrong input or output types.
2645 // Both label are currently actually bound to the same position. We use two
2646 // different label to differentiate the cause leading to type transition.
2647 Label transition;
2648
2649 // Smi-smi fast case.
2650 Label skip;
2651 __ Or(scratch1, left, right);
2652 __ JumpIfNotSmi(scratch1, &skip);
2653 GenerateSmiSmiOperation(masm);
2654 // Fall through if the result is not a smi.
2655 __ bind(&skip);
2656
2657 switch (op_) {
2658 case Token::ADD:
2659 case Token::SUB:
2660 case Token::MUL:
2661 case Token::DIV:
2662 case Token::MOD: {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002663 // Load both operands and check that they are 32-bit integer.
2664 // Jump to type transition if they are not. The registers a0 and a1 (right
2665 // and left) are preserved for the runtime call.
2666 FloatingPointHelper::Destination destination =
2667 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2668 ? FloatingPointHelper::kFPURegisters
2669 : FloatingPointHelper::kCoreRegisters;
Ben Murdoch257744e2011-11-30 15:57:28 +00002670
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002671 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2672 right,
2673 destination,
2674 f14,
2675 a2,
2676 a3,
2677 heap_number_map,
2678 scratch1,
2679 scratch2,
2680 f2,
2681 &transition);
2682 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2683 left,
2684 destination,
2685 f12,
2686 t0,
2687 t1,
2688 heap_number_map,
2689 scratch1,
2690 scratch2,
2691 f2,
2692 &transition);
Ben Murdoch257744e2011-11-30 15:57:28 +00002693
2694 if (destination == FloatingPointHelper::kFPURegisters) {
2695 CpuFeatures::Scope scope(FPU);
2696 Label return_heap_number;
2697 switch (op_) {
2698 case Token::ADD:
2699 __ add_d(f10, f12, f14);
2700 break;
2701 case Token::SUB:
2702 __ sub_d(f10, f12, f14);
2703 break;
2704 case Token::MUL:
2705 __ mul_d(f10, f12, f14);
2706 break;
2707 case Token::DIV:
2708 __ div_d(f10, f12, f14);
2709 break;
2710 default:
2711 UNREACHABLE();
2712 }
2713
2714 if (op_ != Token::DIV) {
2715 // These operations produce an integer result.
2716 // Try to return a smi if we can.
2717 // Otherwise return a heap number if allowed, or jump to type
2718 // transition.
2719
Ben Murdoch85b71792012-04-11 18:30:58 +01002720 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
2721 // On MIPS a lot of things cannot be implemented the same way so right
2722 // now it makes a lot more sense to just do things manually.
2723
2724 // Save FCSR.
2725 __ cfc1(scratch1, FCSR);
2726 // Disable FPU exceptions.
2727 __ ctc1(zero_reg, FCSR);
2728 __ trunc_w_d(single_scratch, f10);
2729 // Retrieve FCSR.
2730 __ cfc1(scratch2, FCSR);
2731 // Restore FCSR.
2732 __ ctc1(scratch1, FCSR);
2733
2734 // Check for inexact conversion or exception.
2735 __ And(scratch2, scratch2, kFCSRFlagMask);
Ben Murdoch257744e2011-11-30 15:57:28 +00002736
2737 if (result_type_ <= BinaryOpIC::INT32) {
Ben Murdoch85b71792012-04-11 18:30:58 +01002738 // If scratch2 != 0, result does not fit in a 32-bit integer.
2739 __ Branch(&transition, ne, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00002740 }
2741
2742 // Check if the result fits in a smi.
2743 __ mfc1(scratch1, single_scratch);
2744 __ Addu(scratch2, scratch1, Operand(0x40000000));
2745 // If not try to return a heap number.
2746 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2747 // Check for minus zero. Return heap number for minus zero.
2748 Label not_zero;
2749 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2750 __ mfc1(scratch2, f11);
2751 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2752 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2753 __ bind(&not_zero);
2754
2755 // Tag the result and return.
2756 __ SmiTag(v0, scratch1);
2757 __ Ret();
2758 } else {
2759 // DIV just falls through to allocating a heap number.
2760 }
2761
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002762 __ bind(&return_heap_number);
2763 // Return a heap number, or fall through to type transition or runtime
2764 // call if we can't.
2765 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2766 : BinaryOpIC::INT32)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002767 // We are using FPU registers so s0 is available.
2768 heap_number_result = s0;
2769 GenerateHeapResultAllocation(masm,
2770 heap_number_result,
2771 heap_number_map,
2772 scratch1,
2773 scratch2,
2774 &call_runtime);
2775 __ mov(v0, heap_number_result);
2776 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2777 __ Ret();
2778 }
2779
2780 // A DIV operation expecting an integer result falls through
2781 // to type transition.
2782
2783 } else {
2784 // We preserved a0 and a1 to be able to call runtime.
2785 // Save the left value on the stack.
2786 __ Push(t1, t0);
2787
2788 Label pop_and_call_runtime;
2789
2790 // Allocate a heap number to store the result.
2791 heap_number_result = s0;
2792 GenerateHeapResultAllocation(masm,
2793 heap_number_result,
2794 heap_number_map,
2795 scratch1,
2796 scratch2,
2797 &pop_and_call_runtime);
2798
2799 // Load the left value from the value saved on the stack.
2800 __ Pop(a1, a0);
2801
2802 // Call the C function to handle the double operation.
2803 FloatingPointHelper::CallCCodeForDoubleOperation(
2804 masm, op_, heap_number_result, scratch1);
2805 if (FLAG_debug_code) {
2806 __ stop("Unreachable code.");
2807 }
2808
2809 __ bind(&pop_and_call_runtime);
2810 __ Drop(2);
2811 __ Branch(&call_runtime);
2812 }
2813
2814 break;
2815 }
2816
2817 case Token::BIT_OR:
2818 case Token::BIT_XOR:
2819 case Token::BIT_AND:
2820 case Token::SAR:
2821 case Token::SHR:
2822 case Token::SHL: {
2823 Label return_heap_number;
2824 Register scratch3 = t1;
2825 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
2826 // registers a0 and a1 (right and left) are preserved for the runtime
2827 // call.
2828 FloatingPointHelper::LoadNumberAsInt32(masm,
2829 left,
2830 a3,
2831 heap_number_map,
2832 scratch1,
2833 scratch2,
2834 scratch3,
2835 f0,
2836 &transition);
2837 FloatingPointHelper::LoadNumberAsInt32(masm,
2838 right,
2839 a2,
2840 heap_number_map,
2841 scratch1,
2842 scratch2,
2843 scratch3,
2844 f0,
2845 &transition);
2846
2847 // The ECMA-262 standard specifies that, for shift operations, only the
2848 // 5 least significant bits of the shift value should be used.
2849 switch (op_) {
2850 case Token::BIT_OR:
2851 __ Or(a2, a3, Operand(a2));
2852 break;
2853 case Token::BIT_XOR:
2854 __ Xor(a2, a3, Operand(a2));
2855 break;
2856 case Token::BIT_AND:
2857 __ And(a2, a3, Operand(a2));
2858 break;
2859 case Token::SAR:
2860 __ And(a2, a2, Operand(0x1f));
2861 __ srav(a2, a3, a2);
2862 break;
2863 case Token::SHR:
2864 __ And(a2, a2, Operand(0x1f));
2865 __ srlv(a2, a3, a2);
2866 // SHR is special because it is required to produce a positive answer.
2867 // We only get a negative result if the shift value (a2) is 0.
2868 // This result cannot be respresented as a signed 32-bit integer, try
2869 // to return a heap number if we can.
2870 // The non FPU code does not support this special case, so jump to
2871 // runtime if we don't support it.
2872 if (CpuFeatures::IsSupported(FPU)) {
2873 __ Branch((result_type_ <= BinaryOpIC::INT32)
2874 ? &transition
2875 : &return_heap_number,
2876 lt,
2877 a2,
2878 Operand(zero_reg));
2879 } else {
2880 __ Branch((result_type_ <= BinaryOpIC::INT32)
2881 ? &transition
2882 : &call_runtime,
2883 lt,
2884 a2,
2885 Operand(zero_reg));
2886 }
2887 break;
2888 case Token::SHL:
2889 __ And(a2, a2, Operand(0x1f));
2890 __ sllv(a2, a3, a2);
2891 break;
2892 default:
2893 UNREACHABLE();
2894 }
2895
2896 // Check if the result fits in a smi.
2897 __ Addu(scratch1, a2, Operand(0x40000000));
2898 // If not try to return a heap number. (We know the result is an int32.)
2899 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
2900 // Tag the result and return.
2901 __ SmiTag(v0, a2);
2902 __ Ret();
2903
2904 __ bind(&return_heap_number);
2905 heap_number_result = t1;
2906 GenerateHeapResultAllocation(masm,
2907 heap_number_result,
2908 heap_number_map,
2909 scratch1,
2910 scratch2,
2911 &call_runtime);
2912
2913 if (CpuFeatures::IsSupported(FPU)) {
2914 CpuFeatures::Scope scope(FPU);
2915
2916 if (op_ != Token::SHR) {
2917 // Convert the result to a floating point value.
2918 __ mtc1(a2, double_scratch);
2919 __ cvt_d_w(double_scratch, double_scratch);
2920 } else {
2921 // The result must be interpreted as an unsigned 32-bit integer.
2922 __ mtc1(a2, double_scratch);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002923 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00002924 }
2925
2926 // Store the result.
2927 __ mov(v0, heap_number_result);
2928 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
2929 __ Ret();
2930 } else {
2931 // Tail call that writes the int32 in a2 to the heap number in v0, using
Ben Murdoch85b71792012-04-11 18:30:58 +01002932 // a3 and a1 as scratch. v0 is preserved and returned.
Ben Murdoch257744e2011-11-30 15:57:28 +00002933 __ mov(a0, t1);
Ben Murdoch85b71792012-04-11 18:30:58 +01002934 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
Ben Murdoch257744e2011-11-30 15:57:28 +00002935 __ TailCallStub(&stub);
2936 }
2937
2938 break;
2939 }
2940
2941 default:
2942 UNREACHABLE();
2943 }
2944
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002945 // We never expect DIV to yield an integer result, so we always generate
2946 // type transition code for DIV operations expecting an integer result: the
2947 // code will fall through to this type transition.
2948 if (transition.is_linked() ||
2949 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002950 __ bind(&transition);
2951 GenerateTypeTransition(masm);
2952 }
2953
2954 __ bind(&call_runtime);
2955 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002956}
2957
2958
Ben Murdoch257744e2011-11-30 15:57:28 +00002959void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
2960 Label call_runtime;
2961
2962 if (op_ == Token::ADD) {
2963 // Handle string addition here, because it is the only operation
2964 // that does not do a ToNumber conversion on the operands.
2965 GenerateAddStrings(masm);
2966 }
2967
2968 // Convert oddball arguments to numbers.
2969 Label check, done;
2970 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2971 __ Branch(&check, ne, a1, Operand(t0));
2972 if (Token::IsBitOp(op_)) {
2973 __ li(a1, Operand(Smi::FromInt(0)));
2974 } else {
2975 __ LoadRoot(a1, Heap::kNanValueRootIndex);
2976 }
2977 __ jmp(&done);
2978 __ bind(&check);
2979 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2980 __ Branch(&done, ne, a0, Operand(t0));
2981 if (Token::IsBitOp(op_)) {
2982 __ li(a0, Operand(Smi::FromInt(0)));
2983 } else {
2984 __ LoadRoot(a0, Heap::kNanValueRootIndex);
2985 }
2986 __ bind(&done);
2987
2988 GenerateHeapNumberStub(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002989}
2990
2991
Ben Murdoch257744e2011-11-30 15:57:28 +00002992void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2993 Label call_runtime;
2994 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
2995
2996 __ bind(&call_runtime);
2997 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002998}
2999
3000
Ben Murdoch257744e2011-11-30 15:57:28 +00003001void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3002 Label call_runtime, call_string_add_or_runtime;
3003
3004 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3005
3006 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3007
3008 __ bind(&call_string_add_or_runtime);
3009 if (op_ == Token::ADD) {
3010 GenerateAddStrings(masm);
3011 }
3012
3013 __ bind(&call_runtime);
3014 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003015}
3016
3017
Ben Murdoch257744e2011-11-30 15:57:28 +00003018void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3019 ASSERT(op_ == Token::ADD);
3020 Label left_not_string, call_runtime;
3021
3022 Register left = a1;
3023 Register right = a0;
3024
3025 // Check if left argument is a string.
3026 __ JumpIfSmi(left, &left_not_string);
3027 __ GetObjectType(left, a2, a2);
3028 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3029
3030 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3031 GenerateRegisterArgsPush(masm);
3032 __ TailCallStub(&string_add_left_stub);
3033
3034 // Left operand is not a string, test right.
3035 __ bind(&left_not_string);
3036 __ JumpIfSmi(right, &call_runtime);
3037 __ GetObjectType(right, a2, a2);
3038 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3039
3040 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3041 GenerateRegisterArgsPush(masm);
3042 __ TailCallStub(&string_add_right_stub);
3043
3044 // At least one argument is not a string.
3045 __ bind(&call_runtime);
3046}
3047
3048
3049void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3050 GenerateRegisterArgsPush(masm);
3051 switch (op_) {
3052 case Token::ADD:
3053 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3054 break;
3055 case Token::SUB:
3056 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3057 break;
3058 case Token::MUL:
3059 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3060 break;
3061 case Token::DIV:
3062 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3063 break;
3064 case Token::MOD:
3065 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3066 break;
3067 case Token::BIT_OR:
3068 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3069 break;
3070 case Token::BIT_AND:
3071 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3072 break;
3073 case Token::BIT_XOR:
3074 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3075 break;
3076 case Token::SAR:
3077 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3078 break;
3079 case Token::SHR:
3080 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3081 break;
3082 case Token::SHL:
3083 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3084 break;
3085 default:
3086 UNREACHABLE();
3087 }
3088}
3089
3090
3091void BinaryOpStub::GenerateHeapResultAllocation(
Steve Block44f0eee2011-05-26 01:26:41 +01003092 MacroAssembler* masm,
3093 Register result,
3094 Register heap_number_map,
3095 Register scratch1,
3096 Register scratch2,
3097 Label* gc_required) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003098
3099 // Code below will scratch result if allocation fails. To keep both arguments
3100 // intact for the runtime call result cannot be one of these.
3101 ASSERT(!result.is(a0) && !result.is(a1));
3102
3103 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3104 Label skip_allocation, allocated;
3105 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3106 // If the overwritable operand is already an object, we skip the
3107 // allocation of a heap number.
3108 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3109 // Allocate a heap number for the result.
3110 __ AllocateHeapNumber(
3111 result, scratch1, scratch2, heap_number_map, gc_required);
3112 __ Branch(&allocated);
3113 __ bind(&skip_allocation);
3114 // Use object holding the overwritable operand for result.
3115 __ mov(result, overwritable_operand);
3116 __ bind(&allocated);
3117 } else {
3118 ASSERT(mode_ == NO_OVERWRITE);
3119 __ AllocateHeapNumber(
3120 result, scratch1, scratch2, heap_number_map, gc_required);
3121 }
Steve Block44f0eee2011-05-26 01:26:41 +01003122}
3123
3124
Ben Murdoch257744e2011-11-30 15:57:28 +00003125void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3126 __ Push(a1, a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003127}
3128
3129
3130
3131void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003132 // Untagged case: double input in f4, double result goes
3133 // into f4.
3134 // Tagged case: tagged input on top of stack and in a0,
3135 // tagged result (heap number) goes into v0.
3136
3137 Label input_not_smi;
3138 Label loaded;
3139 Label calculate;
3140 Label invalid_cache;
3141 const Register scratch0 = t5;
3142 const Register scratch1 = t3;
3143 const Register cache_entry = a0;
3144 const bool tagged = (argument_type_ == TAGGED);
3145
3146 if (CpuFeatures::IsSupported(FPU)) {
3147 CpuFeatures::Scope scope(FPU);
3148
3149 if (tagged) {
3150 // Argument is a number and is on stack and in a0.
3151 // Load argument and check if it is a smi.
3152 __ JumpIfNotSmi(a0, &input_not_smi);
3153
3154 // Input is a smi. Convert to double and load the low and high words
3155 // of the double into a2, a3.
3156 __ sra(t0, a0, kSmiTagSize);
3157 __ mtc1(t0, f4);
3158 __ cvt_d_w(f4, f4);
3159 __ Move(a2, a3, f4);
3160 __ Branch(&loaded);
3161
3162 __ bind(&input_not_smi);
3163 // Check if input is a HeapNumber.
3164 __ CheckMap(a0,
3165 a1,
3166 Heap::kHeapNumberMapRootIndex,
3167 &calculate,
3168 DONT_DO_SMI_CHECK);
3169 // Input is a HeapNumber. Store the
3170 // low and high words into a2, a3.
3171 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3172 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3173 } else {
3174 // Input is untagged double in f4. Output goes to f4.
3175 __ Move(a2, a3, f4);
3176 }
3177 __ bind(&loaded);
3178 // a2 = low 32 bits of double value.
3179 // a3 = high 32 bits of double value.
3180 // Compute hash (the shifts are arithmetic):
3181 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3182 __ Xor(a1, a2, a3);
3183 __ sra(t0, a1, 16);
3184 __ Xor(a1, a1, t0);
3185 __ sra(t0, a1, 8);
3186 __ Xor(a1, a1, t0);
3187 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3188 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3189
3190 // a2 = low 32 bits of double value.
3191 // a3 = high 32 bits of double value.
3192 // a1 = TranscendentalCache::hash(double value).
3193 __ li(cache_entry, Operand(
3194 ExternalReference::transcendental_cache_array_address(
3195 masm->isolate())));
3196 // a0 points to cache array.
3197 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3198 Isolate::Current()->transcendental_cache()->caches_[0])));
3199 // a0 points to the cache for the type type_.
3200 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3201 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3202
3203#ifdef DEBUG
3204 // Check that the layout of cache elements match expectations.
3205 { TranscendentalCache::SubCache::Element test_elem[2];
3206 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3207 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3208 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3209 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3210 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3211 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3212 CHECK_EQ(0, elem_in0 - elem_start);
3213 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3214 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3215 }
3216#endif
3217
3218 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3219 __ sll(t0, a1, 1);
3220 __ Addu(a1, a1, t0);
3221 __ sll(t0, a1, 2);
3222 __ Addu(cache_entry, cache_entry, t0);
3223
3224 // Check if cache matches: Double value is stored in uint32_t[2] array.
3225 __ lw(t0, MemOperand(cache_entry, 0));
3226 __ lw(t1, MemOperand(cache_entry, 4));
3227 __ lw(t2, MemOperand(cache_entry, 8));
Ben Murdoch85b71792012-04-11 18:30:58 +01003228 __ Addu(cache_entry, cache_entry, 12);
Ben Murdoch257744e2011-11-30 15:57:28 +00003229 __ Branch(&calculate, ne, a2, Operand(t0));
3230 __ Branch(&calculate, ne, a3, Operand(t1));
3231 // Cache hit. Load result, cleanup and return.
3232 if (tagged) {
3233 // Pop input value from stack and load result into v0.
3234 __ Drop(1);
3235 __ mov(v0, t2);
3236 } else {
3237 // Load result into f4.
3238 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3239 }
3240 __ Ret();
3241 } // if (CpuFeatures::IsSupported(FPU))
3242
3243 __ bind(&calculate);
3244 if (tagged) {
3245 __ bind(&invalid_cache);
3246 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3247 masm->isolate()),
3248 1,
3249 1);
3250 } else {
3251 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3252 CpuFeatures::Scope scope(FPU);
3253
3254 Label no_update;
3255 Label skip_cache;
3256 const Register heap_number_map = t2;
3257
3258 // Call C function to calculate the result and update the cache.
3259 // Register a0 holds precalculated cache entry address; preserve
3260 // it on the stack and pop it into register cache_entry after the
3261 // call.
Ben Murdoch85b71792012-04-11 18:30:58 +01003262 __ push(cache_entry);
Ben Murdoch257744e2011-11-30 15:57:28 +00003263 GenerateCallCFunction(masm, scratch0);
3264 __ GetCFunctionDoubleResult(f4);
3265
3266 // Try to update the cache. If we cannot allocate a
3267 // heap number, we return the result without updating.
Ben Murdoch85b71792012-04-11 18:30:58 +01003268 __ pop(cache_entry);
Ben Murdoch257744e2011-11-30 15:57:28 +00003269 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3270 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3271 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3272
3273 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3274 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3275 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3276
3277 __ mov(v0, cache_entry);
Ben Murdoch85b71792012-04-11 18:30:58 +01003278 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00003279
3280 __ bind(&invalid_cache);
3281 // The cache is invalid. Call runtime which will recreate the
3282 // cache.
3283 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3284 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3285 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01003286 __ EnterInternalFrame();
3287 __ push(a0);
3288 __ CallRuntime(RuntimeFunction(), 1);
3289 __ LeaveInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00003290 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3291 __ Ret();
3292
3293 __ bind(&skip_cache);
3294 // Call C function to calculate the result and answer directly
3295 // without updating the cache.
3296 GenerateCallCFunction(masm, scratch0);
3297 __ GetCFunctionDoubleResult(f4);
3298 __ bind(&no_update);
3299
3300 // We return the value in f4 without adding it to the cache, but
3301 // we cause a scavenging GC so that future allocations will succeed.
Ben Murdoch85b71792012-04-11 18:30:58 +01003302 __ EnterInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00003303
Ben Murdoch85b71792012-04-11 18:30:58 +01003304 // Allocate an aligned object larger than a HeapNumber.
3305 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3306 __ li(scratch0, Operand(4 * kPointerSize));
3307 __ push(scratch0);
3308 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3309 __ LeaveInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00003310 __ Ret();
3311 }
3312}
3313
3314
3315void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3316 Register scratch) {
3317 __ push(ra);
3318 __ PrepareCallCFunction(2, scratch);
3319 if (IsMipsSoftFloatABI) {
Ben Murdoch85b71792012-04-11 18:30:58 +01003320 __ Move(v0, v1, f4);
Ben Murdoch257744e2011-11-30 15:57:28 +00003321 } else {
3322 __ mov_d(f12, f4);
3323 }
3324 switch (type_) {
3325 case TranscendentalCache::SIN:
3326 __ CallCFunction(
Ben Murdoch85b71792012-04-11 18:30:58 +01003327 ExternalReference::math_sin_double_function(masm->isolate()), 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003328 break;
3329 case TranscendentalCache::COS:
3330 __ CallCFunction(
Ben Murdoch85b71792012-04-11 18:30:58 +01003331 ExternalReference::math_cos_double_function(masm->isolate()), 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003332 break;
3333 case TranscendentalCache::LOG:
3334 __ CallCFunction(
Ben Murdoch85b71792012-04-11 18:30:58 +01003335 ExternalReference::math_log_double_function(masm->isolate()), 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003336 break;
3337 default:
3338 UNIMPLEMENTED();
3339 break;
3340 }
3341 __ pop(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003342}
3343
3344
3345Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
Ben Murdoch257744e2011-11-30 15:57:28 +00003346 switch (type_) {
3347 // Add more cases when necessary.
3348 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3349 case TranscendentalCache::COS: return Runtime::kMath_cos;
3350 case TranscendentalCache::LOG: return Runtime::kMath_log;
3351 default:
3352 UNIMPLEMENTED();
3353 return Runtime::kAbort;
3354 }
Steve Block44f0eee2011-05-26 01:26:41 +01003355}
3356
3357
3358void StackCheckStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003359 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003360}
3361
3362
Ben Murdoch257744e2011-11-30 15:57:28 +00003363void MathPowStub::Generate(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01003364 Label call_runtime;
Ben Murdoch257744e2011-11-30 15:57:28 +00003365
Ben Murdoch85b71792012-04-11 18:30:58 +01003366 if (CpuFeatures::IsSupported(FPU)) {
3367 CpuFeatures::Scope scope(FPU);
3368
3369 Label base_not_smi;
3370 Label exponent_not_smi;
3371 Label convert_exponent;
3372
3373 const Register base = a0;
3374 const Register exponent = a2;
3375 const Register heapnumbermap = t1;
3376 const Register heapnumber = s0; // Callee-saved register.
3377 const Register scratch = t2;
3378 const Register scratch2 = t3;
3379
3380 // Alocate FP values in the ABI-parameter-passing regs.
3381 const DoubleRegister double_base = f12;
3382 const DoubleRegister double_exponent = f14;
3383 const DoubleRegister double_result = f0;
3384 const DoubleRegister double_scratch = f2;
3385
3386 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
Ben Murdoch257744e2011-11-30 15:57:28 +00003387 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3388 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3389
Ben Murdoch85b71792012-04-11 18:30:58 +01003390 // Convert base to double value and store it in f0.
3391 __ JumpIfNotSmi(base, &base_not_smi);
3392 // Base is a Smi. Untag and convert it.
3393 __ SmiUntag(base);
3394 __ mtc1(base, double_scratch);
3395 __ cvt_d_w(double_base, double_scratch);
3396 __ Branch(&convert_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00003397
Ben Murdoch85b71792012-04-11 18:30:58 +01003398 __ bind(&base_not_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +00003399 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3400 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
Ben Murdoch85b71792012-04-11 18:30:58 +01003401 // Base is a heapnumber. Load it into double register.
Ben Murdochc7cc0282012-03-05 14:35:55 +00003402 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
Ben Murdochc7cc0282012-03-05 14:35:55 +00003403
Ben Murdoch85b71792012-04-11 18:30:58 +01003404 __ bind(&convert_exponent);
3405 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3406 __ SmiUntag(exponent);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003407
Ben Murdoch85b71792012-04-11 18:30:58 +01003408 // The base is in a double register and the exponent is
3409 // an untagged smi. Allocate a heap number and call a
3410 // C function for integer exponents. The register containing
3411 // the heap number is callee-saved.
3412 __ AllocateHeapNumber(heapnumber,
3413 scratch,
3414 scratch2,
3415 heapnumbermap,
3416 &call_runtime);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003417 __ push(ra);
Ben Murdoch85b71792012-04-11 18:30:58 +01003418 __ PrepareCallCFunction(3, scratch);
3419 __ SetCallCDoubleArguments(double_base, exponent);
3420 __ CallCFunction(
3421 ExternalReference::power_double_int_function(masm->isolate()), 3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003422 __ pop(ra);
3423 __ GetCFunctionDoubleResult(double_result);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003424 __ sdc1(double_result,
3425 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01003426 __ mov(v0, heapnumber);
3427 __ DropAndRet(2 * kPointerSize);
3428
3429 __ bind(&exponent_not_smi);
3430 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3431 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3432 // Exponent is a heapnumber. Load it into double register.
3433 __ ldc1(double_exponent,
3434 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3435
3436 // The base and the exponent are in double registers.
3437 // Allocate a heap number and call a C function for
3438 // double exponents. The register containing
3439 // the heap number is callee-saved.
3440 __ AllocateHeapNumber(heapnumber,
3441 scratch,
3442 scratch2,
3443 heapnumbermap,
3444 &call_runtime);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003445 __ push(ra);
Ben Murdoch85b71792012-04-11 18:30:58 +01003446 __ PrepareCallCFunction(4, scratch);
3447 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3448 ASSERT(double_base.is(f12));
3449 ASSERT(double_exponent.is(f14));
3450 __ SetCallCDoubleArguments(double_base, double_exponent);
3451 __ CallCFunction(
3452 ExternalReference::power_double_double_function(masm->isolate()), 4);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003453 __ pop(ra);
3454 __ GetCFunctionDoubleResult(double_result);
Ben Murdoch85b71792012-04-11 18:30:58 +01003455 __ sdc1(double_result,
3456 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3457 __ mov(v0, heapnumber);
3458 __ DropAndRet(2 * kPointerSize);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003459 }
Ben Murdoch85b71792012-04-11 18:30:58 +01003460
3461 __ bind(&call_runtime);
3462 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003463}
3464
3465
3466bool CEntryStub::NeedsImmovableCode() {
3467 return true;
3468}
3469
3470
Ben Murdoch85b71792012-04-11 18:30:58 +01003471void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3472 __ Throw(v0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003473}
3474
3475
Ben Murdoch85b71792012-04-11 18:30:58 +01003476void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3477 UncatchableExceptionType type) {
3478 __ ThrowUncatchable(type, v0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003479}
3480
3481
Steve Block44f0eee2011-05-26 01:26:41 +01003482void CEntryStub::GenerateCore(MacroAssembler* masm,
3483 Label* throw_normal_exception,
3484 Label* throw_termination_exception,
3485 Label* throw_out_of_memory_exception,
3486 bool do_gc,
3487 bool always_allocate) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003488 // v0: result parameter for PerformGC, if any
3489 // s0: number of arguments including receiver (C callee-saved)
3490 // s1: pointer to the first argument (C callee-saved)
3491 // s2: pointer to builtin function (C callee-saved)
3492
3493 if (do_gc) {
3494 // Move result passed in v0 into a0 to call PerformGC.
3495 __ mov(a0, v0);
Ben Murdoch85b71792012-04-11 18:30:58 +01003496 __ PrepareCallCFunction(1, a1);
3497 __ CallCFunction(
3498 ExternalReference::perform_gc_function(masm->isolate()), 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003499 }
3500
3501 ExternalReference scope_depth =
Ben Murdoch85b71792012-04-11 18:30:58 +01003502 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00003503 if (always_allocate) {
3504 __ li(a0, Operand(scope_depth));
3505 __ lw(a1, MemOperand(a0));
3506 __ Addu(a1, a1, Operand(1));
3507 __ sw(a1, MemOperand(a0));
3508 }
3509
Ben Murdoch85b71792012-04-11 18:30:58 +01003510 // Prepare arguments for C routine: a0 = argc, a1 = argv
Ben Murdoch257744e2011-11-30 15:57:28 +00003511 __ mov(a0, s0);
Ben Murdoch85b71792012-04-11 18:30:58 +01003512 __ mov(a1, s1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003513
3514 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3515 // also need to reserve the 4 argument slots on the stack.
3516
3517 __ AssertStackIsAligned();
3518
3519 __ li(a2, Operand(ExternalReference::isolate_address()));
3520
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003521 // To let the GC traverse the return address of the exit frames, we need to
3522 // know where the return address is. The CEntryStub is unmovable, so
3523 // we can store the address on the stack to be able to find it again and
3524 // we never have to restore it, because it will not change.
Ben Murdoch257744e2011-11-30 15:57:28 +00003525 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3526 // This branch-and-link sequence is needed to find the current PC on mips,
3527 // saved to the ra register.
3528 // Use masm-> here instead of the double-underscore macro since extra
3529 // coverage code can interfere with the proper calculation of ra.
3530 Label find_ra;
3531 masm->bal(&find_ra); // bal exposes branch delay slot.
Ben Murdoch85b71792012-04-11 18:30:58 +01003532 masm->nop(); // Branch delay slot nop.
Ben Murdoch257744e2011-11-30 15:57:28 +00003533 masm->bind(&find_ra);
3534
3535 // Adjust the value in ra to point to the correct return location, 2nd
3536 // instruction past the real call into C code (the jalr(t9)), and push it.
3537 // This is the return address of the exit frame.
Ben Murdoch85b71792012-04-11 18:30:58 +01003538 const int kNumInstructionsToJump = 6;
Ben Murdoch257744e2011-11-30 15:57:28 +00003539 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3540 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
Ben Murdoch85b71792012-04-11 18:30:58 +01003541 masm->Subu(sp, sp, kCArgsSlotsSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00003542 // Stack is still aligned.
3543
3544 // Call the C routine.
3545 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3546 masm->jalr(t9);
Ben Murdoch85b71792012-04-11 18:30:58 +01003547 masm->nop(); // Branch delay slot nop.
Ben Murdoch257744e2011-11-30 15:57:28 +00003548 // Make sure the stored 'ra' points to this position.
3549 ASSERT_EQ(kNumInstructionsToJump,
3550 masm->InstructionsGeneratedSince(&find_ra));
3551 }
3552
Ben Murdoch85b71792012-04-11 18:30:58 +01003553 // Restore stack (remove arg slots).
3554 __ Addu(sp, sp, kCArgsSlotsSize);
3555
Ben Murdoch257744e2011-11-30 15:57:28 +00003556 if (always_allocate) {
3557 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3558 __ li(a2, Operand(scope_depth));
3559 __ lw(a3, MemOperand(a2));
3560 __ Subu(a3, a3, Operand(1));
3561 __ sw(a3, MemOperand(a2));
3562 }
3563
3564 // Check for failure result.
3565 Label failure_returned;
3566 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3567 __ addiu(a2, v0, 1);
3568 __ andi(t0, a2, kFailureTagMask);
Ben Murdoch85b71792012-04-11 18:30:58 +01003569 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00003570
3571 // Exit C frame and return.
3572 // v0:v1: result
3573 // sp: stack pointer
3574 // fp: frame pointer
Ben Murdoch85b71792012-04-11 18:30:58 +01003575 __ LeaveExitFrame(save_doubles_, s0);
3576 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00003577
3578 // Check if we should retry or throw exception.
3579 Label retry;
3580 __ bind(&failure_returned);
3581 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3582 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3583 __ Branch(&retry, eq, t0, Operand(zero_reg));
3584
3585 // Special handling of out of memory exceptions.
3586 Failure* out_of_memory = Failure::OutOfMemoryException();
Ben Murdoch85b71792012-04-11 18:30:58 +01003587 __ Branch(throw_out_of_memory_exception, eq,
3588 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003589
3590 // Retrieve the pending exception and clear the variable.
Ben Murdoch85b71792012-04-11 18:30:58 +01003591 __ li(t0,
3592 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3593 __ lw(a3, MemOperand(t0));
Ben Murdoch589d6972011-11-30 16:04:58 +00003594 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01003595 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00003596 __ lw(v0, MemOperand(t0));
3597 __ sw(a3, MemOperand(t0));
3598
3599 // Special handling of termination exceptions which are uncatchable
3600 // by javascript code.
Ben Murdoch85b71792012-04-11 18:30:58 +01003601 __ Branch(throw_termination_exception, eq,
3602 v0, Operand(masm->isolate()->factory()->termination_exception()));
Ben Murdoch257744e2011-11-30 15:57:28 +00003603
3604 // Handle normal exception.
3605 __ jmp(throw_normal_exception);
3606
3607 __ bind(&retry);
3608 // Last failure (v0) will be moved to (a0) for parameter when retrying.
Steve Block44f0eee2011-05-26 01:26:41 +01003609}
3610
3611
3612void CEntryStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003613 // Called from JavaScript; parameters are on stack as if calling JS function
Ben Murdoch85b71792012-04-11 18:30:58 +01003614 // a0: number of arguments including receiver
3615 // a1: pointer to builtin function
Ben Murdoch257744e2011-11-30 15:57:28 +00003616 // fp: frame pointer (restored after C call)
3617 // sp: stack pointer (restored as callee's sp after C call)
3618 // cp: current context (C callee-saved)
3619
3620 // NOTE: Invocations of builtins may return failure objects
3621 // instead of a proper result. The builtin entry handles
3622 // this by performing a garbage collection and retrying the
3623 // builtin once.
3624
3625 // Compute the argv pointer in a callee-saved register.
Ben Murdoch85b71792012-04-11 18:30:58 +01003626 __ sll(s1, a0, kPointerSizeLog2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003627 __ Addu(s1, sp, s1);
Ben Murdoch85b71792012-04-11 18:30:58 +01003628 __ Subu(s1, s1, Operand(kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00003629
3630 // Enter the exit frame that transitions from JavaScript to C++.
3631 __ EnterExitFrame(save_doubles_);
3632
Ben Murdoch85b71792012-04-11 18:30:58 +01003633 // Setup argc and the builtin function in callee-saved registers.
3634 __ mov(s0, a0);
3635 __ mov(s2, a1);
3636
Ben Murdoch257744e2011-11-30 15:57:28 +00003637 // s0: number of arguments (C callee-saved)
3638 // s1: pointer to first argument (C callee-saved)
3639 // s2: pointer to builtin function (C callee-saved)
3640
3641 Label throw_normal_exception;
3642 Label throw_termination_exception;
3643 Label throw_out_of_memory_exception;
3644
3645 // Call into the runtime system.
3646 GenerateCore(masm,
3647 &throw_normal_exception,
3648 &throw_termination_exception,
3649 &throw_out_of_memory_exception,
3650 false,
3651 false);
3652
3653 // Do space-specific GC and retry runtime call.
3654 GenerateCore(masm,
3655 &throw_normal_exception,
3656 &throw_termination_exception,
3657 &throw_out_of_memory_exception,
3658 true,
3659 false);
3660
3661 // Do full GC and retry runtime call one final time.
3662 Failure* failure = Failure::InternalError();
3663 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3664 GenerateCore(masm,
3665 &throw_normal_exception,
3666 &throw_termination_exception,
3667 &throw_out_of_memory_exception,
3668 true,
3669 true);
3670
3671 __ bind(&throw_out_of_memory_exception);
Ben Murdoch85b71792012-04-11 18:30:58 +01003672 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
Ben Murdoch257744e2011-11-30 15:57:28 +00003673
3674 __ bind(&throw_termination_exception);
Ben Murdoch85b71792012-04-11 18:30:58 +01003675 GenerateThrowUncatchable(masm, TERMINATION);
Ben Murdoch257744e2011-11-30 15:57:28 +00003676
3677 __ bind(&throw_normal_exception);
Ben Murdoch85b71792012-04-11 18:30:58 +01003678 GenerateThrowTOS(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003679}
3680
3681
3682void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Ben Murdoch85b71792012-04-11 18:30:58 +01003683 Label invoke, exit;
Ben Murdoch257744e2011-11-30 15:57:28 +00003684
3685 // Registers:
3686 // a0: entry address
3687 // a1: function
Ben Murdoch85b71792012-04-11 18:30:58 +01003688 // a2: reveiver
Ben Murdoch257744e2011-11-30 15:57:28 +00003689 // a3: argc
3690 //
3691 // Stack:
3692 // 4 args slots
3693 // args
3694
3695 // Save callee saved registers on the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003696 __ MultiPush(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00003697
Ben Murdoch589d6972011-11-30 16:04:58 +00003698 if (CpuFeatures::IsSupported(FPU)) {
3699 CpuFeatures::Scope scope(FPU);
3700 // Save callee-saved FPU registers.
3701 __ MultiPushFPU(kCalleeSavedFPU);
3702 }
3703
Ben Murdoch257744e2011-11-30 15:57:28 +00003704 // Load argv in s0 register.
Ben Murdoch589d6972011-11-30 16:04:58 +00003705 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3706 if (CpuFeatures::IsSupported(FPU)) {
3707 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
3708 }
3709
3710 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00003711
3712 // We build an EntryFrame.
3713 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3714 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3715 __ li(t2, Operand(Smi::FromInt(marker)));
3716 __ li(t1, Operand(Smi::FromInt(marker)));
Ben Murdoch589d6972011-11-30 16:04:58 +00003717 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01003718 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00003719 __ lw(t0, MemOperand(t0));
3720 __ Push(t3, t2, t1, t0);
Ben Murdoch85b71792012-04-11 18:30:58 +01003721 // Setup frame pointer for the frame to be pushed.
Ben Murdoch257744e2011-11-30 15:57:28 +00003722 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3723
3724 // Registers:
3725 // a0: entry_address
3726 // a1: function
Ben Murdoch85b71792012-04-11 18:30:58 +01003727 // a2: reveiver_pointer
Ben Murdoch257744e2011-11-30 15:57:28 +00003728 // a3: argc
3729 // s0: argv
3730 //
3731 // Stack:
3732 // caller fp |
3733 // function slot | entry frame
3734 // context slot |
3735 // bad fp (0xff...f) |
3736 // callee saved registers + ra
3737 // 4 args slots
3738 // args
3739
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003740 // If this is the outermost JS call, set js_entry_sp value.
3741 Label non_outermost_js;
Ben Murdoch85b71792012-04-11 18:30:58 +01003742 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
3743 masm->isolate());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003744 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3745 __ lw(t2, MemOperand(t1));
3746 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
3747 __ sw(fp, MemOperand(t1));
3748 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3749 Label cont;
3750 __ b(&cont);
3751 __ nop(); // Branch delay slot nop.
3752 __ bind(&non_outermost_js);
3753 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3754 __ bind(&cont);
3755 __ push(t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00003756
Ben Murdoch85b71792012-04-11 18:30:58 +01003757 // Call a faked try-block that does the invoke.
3758 __ bal(&invoke); // bal exposes branch delay slot.
3759 __ nop(); // Branch delay slot nop.
3760
3761 // Caught exception: Store result (exception) in the pending
3762 // exception field in the JSEnv and return a failure sentinel.
3763 // Coming in here the fp will be invalid because the PushTryHandler below
3764 // sets it to 0 to signal the existence of the JSEntry frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00003765 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01003766 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00003767 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
3768 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3769 __ b(&exit); // b exposes branch delay slot.
3770 __ nop(); // Branch delay slot nop.
3771
Ben Murdoch85b71792012-04-11 18:30:58 +01003772 // Invoke: Link this frame into the handler chain.
Ben Murdoch257744e2011-11-30 15:57:28 +00003773 __ bind(&invoke);
Ben Murdoch85b71792012-04-11 18:30:58 +01003774 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
Ben Murdoch257744e2011-11-30 15:57:28 +00003775 // If an exception not caught by another handler occurs, this handler
3776 // returns control to the code after the bal(&invoke) above, which
3777 // restores all kCalleeSaved registers (including cp and fp) to their
3778 // saved values before returning a failure to C.
3779
3780 // Clear any pending exceptions.
Ben Murdoch85b71792012-04-11 18:30:58 +01003781 __ li(t0,
3782 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3783 __ lw(t1, MemOperand(t0));
Ben Murdoch589d6972011-11-30 16:04:58 +00003784 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01003785 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00003786 __ sw(t1, MemOperand(t0));
3787
3788 // Invoke the function by calling through JS entry trampoline builtin.
3789 // Notice that we cannot store a reference to the trampoline code directly in
3790 // this stub, because runtime stubs are not traversed when doing GC.
3791
3792 // Registers:
3793 // a0: entry_address
3794 // a1: function
Ben Murdoch85b71792012-04-11 18:30:58 +01003795 // a2: reveiver_pointer
Ben Murdoch257744e2011-11-30 15:57:28 +00003796 // a3: argc
3797 // s0: argv
3798 //
3799 // Stack:
3800 // handler frame
3801 // entry frame
3802 // callee saved registers + ra
3803 // 4 args slots
3804 // args
3805
3806 if (is_construct) {
3807 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
Ben Murdoch85b71792012-04-11 18:30:58 +01003808 masm->isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00003809 __ li(t0, Operand(construct_entry));
3810 } else {
3811 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
3812 __ li(t0, Operand(entry));
3813 }
3814 __ lw(t9, MemOperand(t0)); // Deref address.
3815
3816 // Call JSEntryTrampoline.
3817 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
3818 __ Call(t9);
3819
3820 // Unlink this frame from the handler chain.
3821 __ PopTryHandler();
3822
3823 __ bind(&exit); // v0 holds result
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003824 // Check if the current stack frame is marked as the outermost JS frame.
3825 Label non_outermost_js_2;
3826 __ pop(t1);
Ben Murdoch85b71792012-04-11 18:30:58 +01003827 __ Branch(&non_outermost_js_2, ne, t1,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003828 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3829 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3830 __ sw(zero_reg, MemOperand(t1));
3831 __ bind(&non_outermost_js_2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003832
3833 // Restore the top frame descriptors from the stack.
3834 __ pop(t1);
Ben Murdoch589d6972011-11-30 16:04:58 +00003835 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01003836 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00003837 __ sw(t1, MemOperand(t0));
3838
3839 // Reset the stack to the callee saved registers.
3840 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
3841
Ben Murdoch589d6972011-11-30 16:04:58 +00003842 if (CpuFeatures::IsSupported(FPU)) {
3843 CpuFeatures::Scope scope(FPU);
3844 // Restore callee-saved fpu registers.
3845 __ MultiPopFPU(kCalleeSavedFPU);
3846 }
3847
Ben Murdoch257744e2011-11-30 15:57:28 +00003848 // Restore callee saved registers from the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003849 __ MultiPop(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00003850 // Return.
3851 __ Jump(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003852}
3853
3854
Ben Murdoch257744e2011-11-30 15:57:28 +00003855// Uses registers a0 to t0.
3856// Expected input (depending on whether args are in registers or on the stack):
3857// * object: a0 or at sp + 1 * kPointerSize.
3858// * function: a1 or at sp.
3859//
Ben Murdoch85b71792012-04-11 18:30:58 +01003860// Inlined call site patching is a crankshaft-specific feature that is not
3861// implemented on MIPS.
Steve Block44f0eee2011-05-26 01:26:41 +01003862void InstanceofStub::Generate(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01003863 // This is a crankshaft-specific feature that has not been implemented yet.
3864 ASSERT(!HasCallSiteInlineCheck());
Ben Murdoch257744e2011-11-30 15:57:28 +00003865 // Call site inlining and patching implies arguments in registers.
3866 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3867 // ReturnTrueFalse is only implemented for inlined call sites.
3868 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3869
3870 // Fixed register usage throughout the stub:
3871 const Register object = a0; // Object (lhs).
3872 Register map = a3; // Map of the object.
3873 const Register function = a1; // Function (rhs).
3874 const Register prototype = t0; // Prototype of the function.
3875 const Register inline_site = t5;
3876 const Register scratch = a2;
3877
3878 Label slow, loop, is_instance, is_not_instance, not_js_object;
3879
3880 if (!HasArgsInRegisters()) {
3881 __ lw(object, MemOperand(sp, 1 * kPointerSize));
3882 __ lw(function, MemOperand(sp, 0));
3883 }
3884
3885 // Check that the left hand is a JS object and load map.
3886 __ JumpIfSmi(object, &not_js_object);
3887 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
3888
3889 // If there is a call site cache don't look in the global cache, but do the
3890 // real lookup and update the call site cache.
3891 if (!HasCallSiteInlineCheck()) {
3892 Label miss;
Ben Murdoch85b71792012-04-11 18:30:58 +01003893 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
3894 __ Branch(&miss, ne, function, Operand(t1));
3895 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
3896 __ Branch(&miss, ne, map, Operand(t1));
Ben Murdoch257744e2011-11-30 15:57:28 +00003897 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3898 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3899
3900 __ bind(&miss);
3901 }
3902
3903 // Get the prototype of the function.
Ben Murdoch85b71792012-04-11 18:30:58 +01003904 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
Ben Murdoch257744e2011-11-30 15:57:28 +00003905
3906 // Check that the function prototype is a JS object.
3907 __ JumpIfSmi(prototype, &slow);
3908 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
3909
3910 // Update the global instanceof or call site inlined cache with the current
3911 // map and function. The cached answer will be set when it is known below.
3912 if (!HasCallSiteInlineCheck()) {
3913 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3914 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3915 } else {
Ben Murdoch85b71792012-04-11 18:30:58 +01003916 UNIMPLEMENTED_MIPS();
Ben Murdoch257744e2011-11-30 15:57:28 +00003917 }
3918
3919 // Register mapping: a3 is object map and t0 is function prototype.
3920 // Get prototype of object into a2.
3921 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3922
3923 // We don't need map any more. Use it as a scratch register.
3924 Register scratch2 = map;
3925 map = no_reg;
3926
3927 // Loop through the prototype chain looking for the function prototype.
3928 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
3929 __ bind(&loop);
3930 __ Branch(&is_instance, eq, scratch, Operand(prototype));
3931 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
3932 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
3933 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
3934 __ Branch(&loop);
3935
3936 __ bind(&is_instance);
3937 ASSERT(Smi::FromInt(0) == 0);
3938 if (!HasCallSiteInlineCheck()) {
3939 __ mov(v0, zero_reg);
3940 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3941 } else {
Ben Murdoch85b71792012-04-11 18:30:58 +01003942 UNIMPLEMENTED_MIPS();
Ben Murdoch257744e2011-11-30 15:57:28 +00003943 }
3944 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3945
3946 __ bind(&is_not_instance);
3947 if (!HasCallSiteInlineCheck()) {
3948 __ li(v0, Operand(Smi::FromInt(1)));
3949 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3950 } else {
Ben Murdoch85b71792012-04-11 18:30:58 +01003951 UNIMPLEMENTED_MIPS();
Ben Murdoch257744e2011-11-30 15:57:28 +00003952 }
3953 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3954
3955 Label object_not_null, object_not_null_or_smi;
3956 __ bind(&not_js_object);
3957 // Before null, smi and string value checks, check that the rhs is a function
3958 // as for a non-function rhs an exception needs to be thrown.
3959 __ JumpIfSmi(function, &slow);
3960 __ GetObjectType(function, scratch2, scratch);
3961 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
3962
3963 // Null is not instance of anything.
Ben Murdoch85b71792012-04-11 18:30:58 +01003964 __ Branch(&object_not_null, ne, scratch,
3965 Operand(masm->isolate()->factory()->null_value()));
Ben Murdoch257744e2011-11-30 15:57:28 +00003966 __ li(v0, Operand(Smi::FromInt(1)));
3967 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3968
3969 __ bind(&object_not_null);
3970 // Smi values are not instances of anything.
3971 __ JumpIfNotSmi(object, &object_not_null_or_smi);
3972 __ li(v0, Operand(Smi::FromInt(1)));
3973 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3974
3975 __ bind(&object_not_null_or_smi);
3976 // String values are not instances of anything.
3977 __ IsObjectJSStringType(object, scratch, &slow);
3978 __ li(v0, Operand(Smi::FromInt(1)));
3979 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3980
3981 // Slow-case. Tail call builtin.
3982 __ bind(&slow);
3983 if (!ReturnTrueFalseObject()) {
3984 if (HasArgsInRegisters()) {
3985 __ Push(a0, a1);
3986 }
3987 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3988 } else {
Ben Murdoch85b71792012-04-11 18:30:58 +01003989 __ EnterInternalFrame();
3990 __ Push(a0, a1);
3991 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
3992 __ LeaveInternalFrame();
Ben Murdoch257744e2011-11-30 15:57:28 +00003993 __ mov(a0, v0);
3994 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
3995 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
3996 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
3997 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3998 }
Steve Block44f0eee2011-05-26 01:26:41 +01003999}
4000
4001
Ben Murdoch257744e2011-11-30 15:57:28 +00004002Register InstanceofStub::left() { return a0; }
4003
4004
4005Register InstanceofStub::right() { return a1; }
4006
4007
Steve Block44f0eee2011-05-26 01:26:41 +01004008void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004009 // The displacement is the offset of the last parameter (if any)
4010 // relative to the frame pointer.
4011 static const int kDisplacement =
4012 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4013
4014 // Check that the key is a smiGenerateReadElement.
4015 Label slow;
4016 __ JumpIfNotSmi(a1, &slow);
4017
4018 // Check if the calling frame is an arguments adaptor frame.
4019 Label adaptor;
4020 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4021 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4022 __ Branch(&adaptor,
4023 eq,
4024 a3,
4025 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4026
4027 // Check index (a1) against formal parameters count limit passed in
4028 // through register a0. Use unsigned comparison to get negative
4029 // check for free.
4030 __ Branch(&slow, hs, a1, Operand(a0));
4031
4032 // Read the argument from the stack and return it.
4033 __ subu(a3, a0, a1);
4034 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4035 __ Addu(a3, fp, Operand(t3));
4036 __ lw(v0, MemOperand(a3, kDisplacement));
4037 __ Ret();
4038
4039 // Arguments adaptor case: Check index (a1) against actual arguments
4040 // limit found in the arguments adaptor frame. Use unsigned
4041 // comparison to get negative check for free.
4042 __ bind(&adaptor);
4043 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4044 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4045
4046 // Read the argument from the adaptor frame and return it.
4047 __ subu(a3, a0, a1);
4048 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4049 __ Addu(a3, a2, Operand(t3));
4050 __ lw(v0, MemOperand(a3, kDisplacement));
4051 __ Ret();
4052
4053 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4054 // by calling the runtime system.
4055 __ bind(&slow);
4056 __ push(a1);
4057 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004058}
4059
4060
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004061void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004062 // sp[0] : number of parameters
4063 // sp[4] : receiver displacement
4064 // sp[8] : function
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004065 // Check if the calling frame is an arguments adaptor frame.
4066 Label runtime;
4067 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4068 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004069 __ Branch(&runtime, ne,
4070 a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004071
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004072 // Patch the arguments.length and the parameters pointer in the current frame.
4073 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4074 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4075 __ sll(t3, a2, 1);
4076 __ Addu(a3, a3, Operand(t3));
4077 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4078 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4079
4080 __ bind(&runtime);
4081 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4082}
4083
4084
4085void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4086 // Stack layout:
4087 // sp[0] : number of parameters (tagged)
4088 // sp[4] : address of receiver argument
4089 // sp[8] : function
4090 // Registers used over whole function:
4091 // t2 : allocated object (tagged)
4092 // t5 : mapped parameter count (tagged)
4093
4094 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4095 // a1 = parameter count (tagged)
4096
4097 // Check if the calling frame is an arguments adaptor frame.
4098 Label runtime;
4099 Label adaptor_frame, try_allocate;
4100 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4101 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004102 __ Branch(&adaptor_frame, eq, a2,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004103 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4104
4105 // No adaptor, parameter count = argument count.
4106 __ mov(a2, a1);
4107 __ b(&try_allocate);
4108 __ nop(); // Branch delay slot nop.
4109
4110 // We have an adaptor frame. Patch the parameters pointer.
4111 __ bind(&adaptor_frame);
4112 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4113 __ sll(t6, a2, 1);
4114 __ Addu(a3, a3, Operand(t6));
4115 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4116 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4117
4118 // a1 = parameter count (tagged)
4119 // a2 = argument count (tagged)
4120 // Compute the mapped parameter count = min(a1, a2) in a1.
4121 Label skip_min;
4122 __ Branch(&skip_min, lt, a1, Operand(a2));
4123 __ mov(a1, a2);
4124 __ bind(&skip_min);
4125
4126 __ bind(&try_allocate);
4127
4128 // Compute the sizes of backing store, parameter map, and arguments object.
4129 // 1. Parameter map, has 2 extra words containing context and backing store.
4130 const int kParameterMapHeaderSize =
4131 FixedArray::kHeaderSize + 2 * kPointerSize;
4132 // If there are no mapped parameters, we do not need the parameter_map.
4133 Label param_map_size;
4134 ASSERT_EQ(0, Smi::FromInt(0));
4135 __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4136 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4137 __ sll(t5, a1, 1);
4138 __ addiu(t5, t5, kParameterMapHeaderSize);
4139 __ bind(&param_map_size);
4140
4141 // 2. Backing store.
4142 __ sll(t6, a2, 1);
4143 __ Addu(t5, t5, Operand(t6));
4144 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4145
4146 // 3. Arguments object.
4147 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4148
4149 // Do the allocation of all three objects in one go.
4150 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4151
4152 // v0 = address of new object(s) (tagged)
4153 // a2 = argument count (tagged)
4154 // Get the arguments boilerplate from the current (global) context into t0.
4155 const int kNormalOffset =
4156 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4157 const int kAliasedOffset =
4158 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4159
4160 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4161 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4162 Label skip2_ne, skip2_eq;
4163 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4164 __ lw(t0, MemOperand(t0, kNormalOffset));
4165 __ bind(&skip2_ne);
4166
4167 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4168 __ lw(t0, MemOperand(t0, kAliasedOffset));
4169 __ bind(&skip2_eq);
4170
4171 // v0 = address of new object (tagged)
4172 // a1 = mapped parameter count (tagged)
4173 // a2 = argument count (tagged)
4174 // t0 = address of boilerplate object (tagged)
4175 // Copy the JS object part.
4176 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4177 __ lw(a3, FieldMemOperand(t0, i));
4178 __ sw(a3, FieldMemOperand(v0, i));
4179 }
4180
Ben Murdoch85b71792012-04-11 18:30:58 +01004181 // Setup the callee in-object property.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004182 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4183 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4184 const int kCalleeOffset = JSObject::kHeaderSize +
4185 Heap::kArgumentsCalleeIndex * kPointerSize;
4186 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4187
4188 // Use the length (smi tagged) and set that as an in-object property too.
4189 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4190 const int kLengthOffset = JSObject::kHeaderSize +
4191 Heap::kArgumentsLengthIndex * kPointerSize;
4192 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4193
Ben Murdoch85b71792012-04-11 18:30:58 +01004194 // Setup the elements pointer in the allocated arguments object.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004195 // If we allocated a parameter map, t0 will point there, otherwise
4196 // it will point to the backing store.
4197 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4198 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4199
4200 // v0 = address of new object (tagged)
4201 // a1 = mapped parameter count (tagged)
4202 // a2 = argument count (tagged)
4203 // t0 = address of parameter map or backing store (tagged)
4204 // Initialize parameter map. If there are no mapped arguments, we're done.
4205 Label skip_parameter_map;
4206 Label skip3;
4207 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4208 // Move backing store address to a3, because it is
4209 // expected there when filling in the unmapped arguments.
4210 __ mov(a3, t0);
4211 __ bind(&skip3);
4212
4213 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4214
4215 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4216 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4217 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4218 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4219 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4220 __ sll(t6, a1, 1);
4221 __ Addu(t2, t0, Operand(t6));
4222 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4223 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4224
4225 // Copy the parameter slots and the holes in the arguments.
4226 // We need to fill in mapped_parameter_count slots. They index the context,
4227 // where parameters are stored in reverse order, at
4228 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4229 // The mapped parameter thus need to get indices
4230 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4231 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4232 // We loop from right to left.
4233 Label parameters_loop, parameters_test;
4234 __ mov(t2, a1);
4235 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4236 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4237 __ Subu(t5, t5, Operand(a1));
4238 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4239 __ sll(t6, t2, 1);
4240 __ Addu(a3, t0, Operand(t6));
4241 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4242
4243 // t2 = loop variable (tagged)
4244 // a1 = mapping index (tagged)
4245 // a3 = address of backing store (tagged)
4246 // t0 = address of parameter map (tagged)
4247 // t1 = temporary scratch (a.o., for address calculation)
4248 // t3 = the hole value
4249 __ jmp(&parameters_test);
4250
4251 __ bind(&parameters_loop);
4252 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4253 __ sll(t1, t2, 1);
4254 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4255 __ Addu(t6, t0, t1);
4256 __ sw(t5, MemOperand(t6));
4257 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4258 __ Addu(t6, a3, t1);
4259 __ sw(t3, MemOperand(t6));
4260 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4261 __ bind(&parameters_test);
4262 __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4263
4264 __ bind(&skip_parameter_map);
4265 // a2 = argument count (tagged)
4266 // a3 = address of backing store (tagged)
4267 // t1 = scratch
4268 // Copy arguments header and remaining slots (if there are any).
4269 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4270 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4271 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4272
4273 Label arguments_loop, arguments_test;
4274 __ mov(t5, a1);
4275 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4276 __ sll(t6, t5, 1);
4277 __ Subu(t0, t0, Operand(t6));
4278 __ jmp(&arguments_test);
4279
4280 __ bind(&arguments_loop);
4281 __ Subu(t0, t0, Operand(kPointerSize));
4282 __ lw(t2, MemOperand(t0, 0));
4283 __ sll(t6, t5, 1);
4284 __ Addu(t1, a3, Operand(t6));
4285 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4286 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4287
4288 __ bind(&arguments_test);
4289 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4290
4291 // Return and remove the on-stack parameters.
Ben Murdoch85b71792012-04-11 18:30:58 +01004292 __ Addu(sp, sp, Operand(3 * kPointerSize));
4293 __ Ret();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004294
4295 // Do the runtime call to allocate the arguments object.
Ben Murdoch85b71792012-04-11 18:30:58 +01004296 // a2 = argument count (taggged)
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004297 __ bind(&runtime);
4298 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4299 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4300}
4301
4302
4303void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4304 // sp[0] : number of parameters
4305 // sp[4] : receiver displacement
4306 // sp[8] : function
Ben Murdoch257744e2011-11-30 15:57:28 +00004307 // Check if the calling frame is an arguments adaptor frame.
4308 Label adaptor_frame, try_allocate, runtime;
4309 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4310 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4311 __ Branch(&adaptor_frame,
4312 eq,
4313 a3,
4314 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4315
4316 // Get the length from the frame.
4317 __ lw(a1, MemOperand(sp, 0));
4318 __ Branch(&try_allocate);
4319
4320 // Patch the arguments.length and the parameters pointer.
4321 __ bind(&adaptor_frame);
4322 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4323 __ sw(a1, MemOperand(sp, 0));
4324 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4325 __ Addu(a3, a2, Operand(at));
4326
4327 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4328 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4329
4330 // Try the new space allocation. Start out with computing the size
4331 // of the arguments object and the elements array in words.
4332 Label add_arguments_object;
4333 __ bind(&try_allocate);
4334 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4335 __ srl(a1, a1, kSmiTagSize);
4336
4337 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4338 __ bind(&add_arguments_object);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004339 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004340
4341 // Do the allocation of both objects in one go.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004342 __ AllocateInNewSpace(a1,
4343 v0,
4344 a2,
4345 a3,
4346 &runtime,
4347 static_cast<AllocationFlags>(TAG_OBJECT |
4348 SIZE_IN_WORDS));
Ben Murdoch257744e2011-11-30 15:57:28 +00004349
4350 // Get the arguments boilerplate from the current (global) context.
4351 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4352 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004353 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4354 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004355
4356 // Copy the JS object part.
4357 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4358
Ben Murdoch257744e2011-11-30 15:57:28 +00004359 // Get the length (smi tagged) and set that as an in-object property too.
4360 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4361 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4362 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004363 Heap::kArgumentsLengthIndex * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004364
4365 Label done;
4366 __ Branch(&done, eq, a1, Operand(zero_reg));
4367
4368 // Get the parameters pointer from the stack.
4369 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4370
Ben Murdoch85b71792012-04-11 18:30:58 +01004371 // Setup the elements pointer in the allocated arguments object and
Ben Murdoch257744e2011-11-30 15:57:28 +00004372 // initialize the header in the elements fixed array.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004373 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
Ben Murdoch257744e2011-11-30 15:57:28 +00004374 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4375 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4376 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4377 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004378 // Untag the length for the loop.
4379 __ srl(a1, a1, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00004380
4381 // Copy the fixed array slots.
4382 Label loop;
Ben Murdoch85b71792012-04-11 18:30:58 +01004383 // Setup t0 to point to the first array slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00004384 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4385 __ bind(&loop);
4386 // Pre-decrement a2 with kPointerSize on each iteration.
4387 // Pre-decrement in order to skip receiver.
4388 __ Addu(a2, a2, Operand(-kPointerSize));
4389 __ lw(a3, MemOperand(a2));
4390 // Post-increment t0 with kPointerSize on each iteration.
4391 __ sw(a3, MemOperand(t0));
4392 __ Addu(t0, t0, Operand(kPointerSize));
4393 __ Subu(a1, a1, Operand(1));
4394 __ Branch(&loop, ne, a1, Operand(zero_reg));
4395
4396 // Return and remove the on-stack parameters.
4397 __ bind(&done);
Ben Murdoch85b71792012-04-11 18:30:58 +01004398 __ Addu(sp, sp, Operand(3 * kPointerSize));
4399 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00004400
4401 // Do the runtime call to allocate the arguments object.
4402 __ bind(&runtime);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004403 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004404}
4405
4406
4407void RegExpExecStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004408 // Just jump directly to runtime if native RegExp is not selected at compile
4409 // time or if regexp entry in generated code is turned off runtime switch or
4410 // at compilation.
4411#ifdef V8_INTERPRETED_REGEXP
4412 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4413#else // V8_INTERPRETED_REGEXP
Ben Murdoch85b71792012-04-11 18:30:58 +01004414 if (!FLAG_regexp_entry_native) {
4415 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4416 return;
4417 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004418
4419 // Stack frame on entry.
4420 // sp[0]: last_match_info (expected JSArray)
4421 // sp[4]: previous index
4422 // sp[8]: subject string
4423 // sp[12]: JSRegExp object
4424
4425 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4426 static const int kPreviousIndexOffset = 1 * kPointerSize;
4427 static const int kSubjectOffset = 2 * kPointerSize;
4428 static const int kJSRegExpOffset = 3 * kPointerSize;
4429
4430 Label runtime, invoke_regexp;
4431
4432 // Allocation of registers for this function. These are in callee save
4433 // registers and will be preserved by the call to the native RegExp code, as
4434 // this code is called using the normal C calling convention. When calling
4435 // directly from generated code the native RegExp code will not do a GC and
4436 // therefore the content of these registers are safe to use after the call.
4437 // MIPS - using s0..s2, since we are not using CEntry Stub.
4438 Register subject = s0;
4439 Register regexp_data = s1;
4440 Register last_match_info_elements = s2;
4441
4442 // Ensure that a RegExp stack is allocated.
4443 ExternalReference address_of_regexp_stack_memory_address =
4444 ExternalReference::address_of_regexp_stack_memory_address(
Ben Murdoch85b71792012-04-11 18:30:58 +01004445 masm->isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00004446 ExternalReference address_of_regexp_stack_memory_size =
Ben Murdoch85b71792012-04-11 18:30:58 +01004447 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00004448 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4449 __ lw(a0, MemOperand(a0, 0));
4450 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4451
4452 // Check that the first argument is a JSRegExp object.
4453 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4454 STATIC_ASSERT(kSmiTag == 0);
4455 __ JumpIfSmi(a0, &runtime);
4456 __ GetObjectType(a0, a1, a1);
4457 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4458
4459 // Check that the RegExp has been compiled (data contains a fixed array).
4460 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4461 if (FLAG_debug_code) {
4462 __ And(t0, regexp_data, Operand(kSmiTagMask));
4463 __ Check(nz,
4464 "Unexpected type for RegExp data, FixedArray expected",
4465 t0,
4466 Operand(zero_reg));
4467 __ GetObjectType(regexp_data, a0, a0);
4468 __ Check(eq,
4469 "Unexpected type for RegExp data, FixedArray expected",
4470 a0,
4471 Operand(FIXED_ARRAY_TYPE));
4472 }
4473
4474 // regexp_data: RegExp data (FixedArray)
4475 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4476 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4477 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4478
4479 // regexp_data: RegExp data (FixedArray)
4480 // Check that the number of captures fit in the static offsets vector buffer.
4481 __ lw(a2,
4482 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4483 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4484 // uses the asumption that smis are 2 * their untagged value.
4485 STATIC_ASSERT(kSmiTag == 0);
4486 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4487 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4488 // Check that the static offsets vector buffer is large enough.
4489 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4490
4491 // a2: Number of capture registers
4492 // regexp_data: RegExp data (FixedArray)
4493 // Check that the second argument is a string.
4494 __ lw(subject, MemOperand(sp, kSubjectOffset));
4495 __ JumpIfSmi(subject, &runtime);
4496 __ GetObjectType(subject, a0, a0);
4497 __ And(a0, a0, Operand(kIsNotStringMask));
4498 STATIC_ASSERT(kStringTag == 0);
4499 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4500
4501 // Get the length of the string to r3.
4502 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4503
4504 // a2: Number of capture registers
4505 // a3: Length of subject string as a smi
4506 // subject: Subject string
4507 // regexp_data: RegExp data (FixedArray)
4508 // Check that the third argument is a positive smi less than the subject
4509 // string length. A negative value will be greater (unsigned comparison).
4510 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004511 __ And(at, a0, Operand(kSmiTagMask));
4512 __ Branch(&runtime, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004513 __ Branch(&runtime, ls, a3, Operand(a0));
4514
4515 // a2: Number of capture registers
4516 // subject: Subject string
4517 // regexp_data: RegExp data (FixedArray)
4518 // Check that the fourth object is a JSArray object.
4519 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4520 __ JumpIfSmi(a0, &runtime);
4521 __ GetObjectType(a0, a1, a1);
4522 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4523 // Check that the JSArray is in fast case.
4524 __ lw(last_match_info_elements,
4525 FieldMemOperand(a0, JSArray::kElementsOffset));
4526 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4527 __ Branch(&runtime, ne, a0, Operand(
Ben Murdoch85b71792012-04-11 18:30:58 +01004528 masm->isolate()->factory()->fixed_array_map()));
Ben Murdoch257744e2011-11-30 15:57:28 +00004529 // Check that the last match info has space for the capture registers and the
4530 // additional information.
4531 __ lw(a0,
4532 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4533 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4534 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4535 __ Branch(&runtime, gt, a2, Operand(at));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004536
4537 // Reset offset for possibly sliced string.
4538 __ mov(t0, zero_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00004539 // subject: Subject string
4540 // regexp_data: RegExp data (FixedArray)
4541 // Check the representation and encoding of the subject string.
4542 Label seq_string;
4543 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4544 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004545 // First check for flat string.
4546 __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00004547 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004548 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004549
4550 // subject: Subject string
4551 // a0: instance type if Subject string
4552 // regexp_data: RegExp data (FixedArray)
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004553 // Check for flat cons string or sliced string.
Ben Murdoch257744e2011-11-30 15:57:28 +00004554 // A flat cons string is a cons string where the second part is the empty
4555 // string. In that case the subject string is just the first part of the cons
4556 // string. Also in this case the first part of the cons string is known to be
4557 // a sequential string or an external string.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004558 // In the case of a sliced string its offset has to be taken into account.
Ben Murdoch85b71792012-04-11 18:30:58 +01004559 Label cons_string, check_encoding;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004560 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4561 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4562 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
Ben Murdoch85b71792012-04-11 18:30:58 +01004563 __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004564
4565 // String is sliced.
4566 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4567 __ sra(t0, t0, kSmiTagSize);
4568 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4569 // t5: offset of sliced string, smi-tagged.
4570 __ jmp(&check_encoding);
4571 // String is a cons string, check whether it is flat.
4572 __ bind(&cons_string);
Ben Murdoch257744e2011-11-30 15:57:28 +00004573 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4574 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4575 __ Branch(&runtime, ne, a0, Operand(a1));
4576 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004577 // Is first part of cons or parent of slice a flat string?
4578 __ bind(&check_encoding);
Ben Murdoch257744e2011-11-30 15:57:28 +00004579 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4580 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004581 STATIC_ASSERT(kSeqStringTag == 0);
4582 __ And(at, a0, Operand(kStringRepresentationMask));
Ben Murdoch85b71792012-04-11 18:30:58 +01004583 __ Branch(&runtime, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004584
4585 __ bind(&seq_string);
4586 // subject: Subject string
4587 // regexp_data: RegExp data (FixedArray)
4588 // a0: Instance type of subject string
4589 STATIC_ASSERT(kStringEncodingMask == 4);
4590 STATIC_ASSERT(kAsciiStringTag == 4);
4591 STATIC_ASSERT(kTwoByteStringTag == 0);
4592 // Find the code object based on the assumptions above.
Ben Murdoch85b71792012-04-11 18:30:58 +01004593 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
Ben Murdoch257744e2011-11-30 15:57:28 +00004594 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004595 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004596 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004597 __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
Ben Murdoch257744e2011-11-30 15:57:28 +00004598
4599 // Check that the irregexp code has been generated for the actual string
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004600 // encoding. If it has, the field contains a code object otherwise it contains
4601 // a smi (code flushing support).
4602 __ JumpIfSmi(t9, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00004603
4604 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4605 // t9: code
4606 // subject: Subject string
4607 // regexp_data: RegExp data (FixedArray)
4608 // Load used arguments before starting to push arguments for call to native
4609 // RegExp code to avoid handling changing stack height.
4610 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4611 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
4612
4613 // a1: previous index
4614 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4615 // t9: code
4616 // subject: Subject string
4617 // regexp_data: RegExp data (FixedArray)
4618 // All checks done. Now push arguments for native regexp code.
Ben Murdoch85b71792012-04-11 18:30:58 +01004619 __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
Ben Murdoch257744e2011-11-30 15:57:28 +00004620 1, a0, a2);
4621
4622 // Isolates: note we add an additional parameter here (isolate pointer).
4623 static const int kRegExpExecuteArguments = 8;
4624 static const int kParameterRegisters = 4;
4625 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4626
4627 // Stack pointer now points to cell where return address is to be written.
4628 // Arguments are before that on the stack or in registers, meaning we
4629 // treat the return address as argument 5. Thus every argument after that
4630 // needs to be shifted back by 1. Since DirectCEntryStub will handle
4631 // allocating space for the c argument slots, we don't need to calculate
4632 // that into the argument positions on the stack. This is how the stack will
4633 // look (sp meaning the value of sp at this moment):
4634 // [sp + 4] - Argument 8
4635 // [sp + 3] - Argument 7
4636 // [sp + 2] - Argument 6
4637 // [sp + 1] - Argument 5
4638 // [sp + 0] - saved ra
4639
4640 // Argument 8: Pass current isolate address.
4641 // CFunctionArgumentOperand handles MIPS stack argument slots.
4642 __ li(a0, Operand(ExternalReference::isolate_address()));
4643 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4644
4645 // Argument 7: Indicate that this is a direct call from JavaScript.
4646 __ li(a0, Operand(1));
4647 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4648
4649 // Argument 6: Start (high end) of backtracking stack memory area.
4650 __ li(a0, Operand(address_of_regexp_stack_memory_address));
4651 __ lw(a0, MemOperand(a0, 0));
4652 __ li(a2, Operand(address_of_regexp_stack_memory_size));
4653 __ lw(a2, MemOperand(a2, 0));
4654 __ addu(a0, a0, a2);
4655 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4656
4657 // Argument 5: static offsets vector buffer.
4658 __ li(a0, Operand(
Ben Murdoch85b71792012-04-11 18:30:58 +01004659 ExternalReference::address_of_static_offsets_vector(masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00004660 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4661
4662 // For arguments 4 and 3 get string length, calculate start of string data
4663 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
Ben Murdoch85b71792012-04-11 18:30:58 +01004664 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4665 __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00004666 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004667 // Load the length from the original subject string from the previous stack
4668 // frame. Therefore we have to use fp, which points exactly to two pointer
4669 // sizes below the previous sp. (Because creating a new stack frame pushes
4670 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
Ben Murdoch589d6972011-11-30 16:04:58 +00004671 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004672 // If slice offset is not 0, load the length from the original sliced string.
4673 // Argument 4, a3: End of string data
4674 // Argument 3, a2: Start of string data
4675 // Prepare start and end index of the input.
4676 __ sllv(t1, t0, a3);
4677 __ addu(t0, t2, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004678 __ sllv(t1, a1, a3);
4679 __ addu(a2, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004680
Ben Murdoch589d6972011-11-30 16:04:58 +00004681 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004682 __ sra(t2, t2, kSmiTagSize);
4683 __ sllv(t1, t2, a3);
4684 __ addu(a3, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00004685 // Argument 2 (a1): Previous index.
4686 // Already there
4687
4688 // Argument 1 (a0): Subject string.
Ben Murdoch589d6972011-11-30 16:04:58 +00004689 __ mov(a0, subject);
Ben Murdoch257744e2011-11-30 15:57:28 +00004690
4691 // Locate the code entry and call it.
4692 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4693 DirectCEntryStub stub;
4694 stub.GenerateCall(masm, t9);
4695
4696 __ LeaveExitFrame(false, no_reg);
4697
4698 // v0: result
4699 // subject: subject string (callee saved)
4700 // regexp_data: RegExp data (callee saved)
4701 // last_match_info_elements: Last match info elements (callee saved)
4702
4703 // Check the result.
4704
4705 Label success;
Ben Murdoch85b71792012-04-11 18:30:58 +01004706 __ Branch(&success, eq,
4707 v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Ben Murdoch257744e2011-11-30 15:57:28 +00004708 Label failure;
Ben Murdoch85b71792012-04-11 18:30:58 +01004709 __ Branch(&failure, eq,
4710 v0, Operand(NativeRegExpMacroAssembler::FAILURE));
Ben Murdoch257744e2011-11-30 15:57:28 +00004711 // If not exception it can only be retry. Handle that in the runtime system.
Ben Murdoch85b71792012-04-11 18:30:58 +01004712 __ Branch(&runtime, ne,
4713 v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
Ben Murdoch257744e2011-11-30 15:57:28 +00004714 // Result must now be exception. If there is no pending exception already a
4715 // stack overflow (on the backtrack stack) was detected in RegExp code but
4716 // haven't created the exception yet. Handle that in the runtime system.
4717 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
Ben Murdoch85b71792012-04-11 18:30:58 +01004718 __ li(a1, Operand(
4719 ExternalReference::the_hole_value_location(masm->isolate())));
4720 __ lw(a1, MemOperand(a1, 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00004721 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch85b71792012-04-11 18:30:58 +01004722 masm->isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00004723 __ lw(v0, MemOperand(a2, 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00004724 __ Branch(&runtime, eq, v0, Operand(a1));
Ben Murdoch257744e2011-11-30 15:57:28 +00004725
4726 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
4727
4728 // Check if the exception is a termination. If so, throw as uncatchable.
4729 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
4730 Label termination_exception;
Ben Murdoch589d6972011-11-30 16:04:58 +00004731 __ Branch(&termination_exception, eq, v0, Operand(a0));
Ben Murdoch257744e2011-11-30 15:57:28 +00004732
Ben Murdoch85b71792012-04-11 18:30:58 +01004733 __ Throw(v0); // Expects thrown value in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00004734
4735 __ bind(&termination_exception);
Ben Murdoch85b71792012-04-11 18:30:58 +01004736 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00004737
4738 __ bind(&failure);
4739 // For failure and exception return null.
Ben Murdoch85b71792012-04-11 18:30:58 +01004740 __ li(v0, Operand(masm->isolate()->factory()->null_value()));
4741 __ Addu(sp, sp, Operand(4 * kPointerSize));
4742 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00004743
4744 // Process the result from the native regexp code.
4745 __ bind(&success);
4746 __ lw(a1,
4747 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4748 // Calculate number of capture registers (number_of_captures + 1) * 2.
4749 STATIC_ASSERT(kSmiTag == 0);
4750 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4751 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
4752
4753 // a1: number of capture registers
4754 // subject: subject string
4755 // Store the capture count.
4756 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
4757 __ sw(a2, FieldMemOperand(last_match_info_elements,
4758 RegExpImpl::kLastCaptureCountOffset));
4759 // Store last subject and last input.
Ben Murdoch85b71792012-04-11 18:30:58 +01004760 __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
Ben Murdoch257744e2011-11-30 15:57:28 +00004761 __ sw(subject,
4762 FieldMemOperand(last_match_info_elements,
4763 RegExpImpl::kLastSubjectOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004764 __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004765 __ sw(subject,
4766 FieldMemOperand(last_match_info_elements,
4767 RegExpImpl::kLastInputOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004768 __ mov(a3, last_match_info_elements);
4769 __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004770
4771 // Get the static offsets vector filled by the native regexp code.
4772 ExternalReference address_of_static_offsets_vector =
Ben Murdoch85b71792012-04-11 18:30:58 +01004773 ExternalReference::address_of_static_offsets_vector(masm->isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00004774 __ li(a2, Operand(address_of_static_offsets_vector));
4775
4776 // a1: number of capture registers
4777 // a2: offsets vector
4778 Label next_capture, done;
4779 // Capture register counter starts from number of capture registers and
4780 // counts down until wrapping after zero.
4781 __ Addu(a0,
4782 last_match_info_elements,
4783 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4784 __ bind(&next_capture);
4785 __ Subu(a1, a1, Operand(1));
4786 __ Branch(&done, lt, a1, Operand(zero_reg));
4787 // Read the value from the static offsets vector buffer.
4788 __ lw(a3, MemOperand(a2, 0));
4789 __ addiu(a2, a2, kPointerSize);
4790 // Store the smi value in the last match info.
4791 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
4792 __ sw(a3, MemOperand(a0, 0));
4793 __ Branch(&next_capture, USE_DELAY_SLOT);
Ben Murdoch85b71792012-04-11 18:30:58 +01004794 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00004795
4796 __ bind(&done);
4797
4798 // Return last match info.
4799 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004800 __ Addu(sp, sp, Operand(4 * kPointerSize));
4801 __ Ret();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004802
Ben Murdoch257744e2011-11-30 15:57:28 +00004803 // Do the runtime call to execute the regexp.
4804 __ bind(&runtime);
4805 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4806#endif // V8_INTERPRETED_REGEXP
Steve Block44f0eee2011-05-26 01:26:41 +01004807}
4808
4809
4810void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004811 const int kMaxInlineLength = 100;
4812 Label slowcase;
4813 Label done;
4814 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4815 STATIC_ASSERT(kSmiTag == 0);
4816 STATIC_ASSERT(kSmiTagSize == 1);
4817 __ JumpIfNotSmi(a1, &slowcase);
4818 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
4819 // Smi-tagging is equivalent to multiplying by 2.
4820 // Allocate RegExpResult followed by FixedArray with size in ebx.
4821 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4822 // Elements: [Map][Length][..elements..]
4823 // Size of JSArray with two in-object properties and the header of a
4824 // FixedArray.
4825 int objects_size =
4826 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4827 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
4828 __ Addu(a2, t1, Operand(objects_size));
4829 __ AllocateInNewSpace(
4830 a2, // In: Size, in words.
4831 v0, // Out: Start of allocation (tagged).
4832 a3, // Scratch register.
4833 t0, // Scratch register.
4834 &slowcase,
4835 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4836 // v0: Start of allocated area, object-tagged.
4837 // a1: Number of elements in array, as smi.
4838 // t1: Number of elements, untagged.
4839
4840 // Set JSArray map to global.regexp_result_map().
4841 // Set empty properties FixedArray.
4842 // Set elements to point to FixedArray allocated right after the JSArray.
4843 // Interleave operations for better latency.
4844 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
4845 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
4846 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
4847 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
4848 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
4849 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
4850 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4851 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
4852
4853 // Set input, index and length fields from arguments.
4854 __ lw(a1, MemOperand(sp, kPointerSize * 0));
4855 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01004856 __ lw(a1, MemOperand(sp, kPointerSize * 1));
4857 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
4858 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4859 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004860
4861 // Fill out the elements FixedArray.
4862 // v0: JSArray, tagged.
4863 // a3: FixedArray, tagged.
4864 // t1: Number of elements in array, untagged.
4865
4866 // Set map.
4867 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
4868 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
4869 // Set FixedArray length.
4870 __ sll(t2, t1, kSmiTagSize);
4871 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4872 // Fill contents of fixed-array with the-hole.
4873 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
4874 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4875 // Fill fixed array elements with hole.
4876 // v0: JSArray, tagged.
4877 // a2: the hole.
4878 // a3: Start of elements in FixedArray.
4879 // t1: Number of elements to fill.
4880 Label loop;
4881 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
4882 __ addu(t1, t1, a3); // Point past last element to store.
4883 __ bind(&loop);
4884 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
4885 __ sw(a2, MemOperand(a3));
4886 __ Branch(&loop, USE_DELAY_SLOT);
4887 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
4888
4889 __ bind(&done);
Ben Murdoch85b71792012-04-11 18:30:58 +01004890 __ Addu(sp, sp, Operand(3 * kPointerSize));
4891 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00004892
4893 __ bind(&slowcase);
4894 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004895}
4896
4897
4898void CallFunctionStub::Generate(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01004899 Label slow;
Ben Murdoch257744e2011-11-30 15:57:28 +00004900
4901 // The receiver might implicitly be the global object. This is
4902 // indicated by passing the hole as the receiver to the call
4903 // function stub.
4904 if (ReceiverMightBeImplicit()) {
4905 Label call;
4906 // Get the receiver from the stack.
4907 // function, receiver [, arguments]
4908 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
4909 // Call as function is indicated with the hole.
4910 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4911 __ Branch(&call, ne, t0, Operand(at));
4912 // Patch the receiver on the stack with the global receiver object.
Ben Murdoch85b71792012-04-11 18:30:58 +01004913 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4914 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
4915 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004916 __ bind(&call);
4917 }
4918
Ben Murdoch85b71792012-04-11 18:30:58 +01004919 // Get the function to call from the stack.
4920 // function, receiver [, arguments]
4921 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
4922
Ben Murdoch257744e2011-11-30 15:57:28 +00004923 // Check that the function is really a JavaScript function.
4924 // a1: pushed function (to be verified)
Ben Murdoch85b71792012-04-11 18:30:58 +01004925 __ JumpIfSmi(a1, &slow);
Ben Murdoch257744e2011-11-30 15:57:28 +00004926 // Get the map of the function object.
4927 __ GetObjectType(a1, a2, a2);
4928 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
4929
4930 // Fast-case: Invoke the function now.
4931 // a1: pushed function
4932 ParameterCount actual(argc_);
4933
4934 if (ReceiverMightBeImplicit()) {
4935 Label call_as_function;
4936 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4937 __ Branch(&call_as_function, eq, t0, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004938 __ InvokeFunction(a1,
4939 actual,
4940 JUMP_FUNCTION,
4941 NullCallWrapper(),
4942 CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00004943 __ bind(&call_as_function);
4944 }
4945 __ InvokeFunction(a1,
4946 actual,
4947 JUMP_FUNCTION,
4948 NullCallWrapper(),
4949 CALL_AS_FUNCTION);
4950
4951 // Slow-case: Non-function called.
4952 __ bind(&slow);
4953 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4954 // of the original receiver from the call site).
4955 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
Ben Murdoch85b71792012-04-11 18:30:58 +01004956 __ li(a0, Operand(argc_)); // Setup the number of arguments.
Ben Murdoch257744e2011-11-30 15:57:28 +00004957 __ mov(a2, zero_reg);
4958 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004959 __ SetCallKind(t1, CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00004960 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4961 RelocInfo::CODE_TARGET);
Steve Block44f0eee2011-05-26 01:26:41 +01004962}
4963
4964
4965// Unfortunately you have to run without snapshots to see most of these
4966// names in the profile since most compare stubs end up in the snapshot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004967void CompareStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004968 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4969 (lhs_.is(a1) && rhs_.is(a0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004970 const char* cc_name;
4971 switch (cc_) {
4972 case lt: cc_name = "LT"; break;
4973 case gt: cc_name = "GT"; break;
4974 case le: cc_name = "LE"; break;
4975 case ge: cc_name = "GE"; break;
4976 case eq: cc_name = "EQ"; break;
4977 case ne: cc_name = "NE"; break;
4978 default: cc_name = "UnknownCondition"; break;
4979 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004980 bool is_equality = cc_ == eq || cc_ == ne;
4981 stream->Add("CompareStub_%s", cc_name);
4982 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
4983 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
4984 if (strict_ && is_equality) stream->Add("_STRICT");
4985 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4986 if (!include_number_compare_) stream->Add("_NO_NUMBER");
4987 if (!include_smi_compare_) stream->Add("_NO_SMI");
Steve Block44f0eee2011-05-26 01:26:41 +01004988}
4989
4990
4991int CompareStub::MinorKey() {
Ben Murdoch257744e2011-11-30 15:57:28 +00004992 // Encode the two parameters in a unique 16 bit value.
4993 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
4994 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4995 (lhs_.is(a1) && rhs_.is(a0)));
4996 return ConditionField::encode(static_cast<unsigned>(cc_))
4997 | RegisterField::encode(lhs_.is(a0))
4998 | StrictField::encode(strict_)
4999 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5000 | IncludeSmiCompareField::encode(include_smi_compare_);
Steve Block44f0eee2011-05-26 01:26:41 +01005001}
5002
5003
Ben Murdoch257744e2011-11-30 15:57:28 +00005004// StringCharCodeAtGenerator.
Steve Block44f0eee2011-05-26 01:26:41 +01005005void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005006 Label flat_string;
5007 Label ascii_string;
5008 Label got_char_code;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005009 Label sliced_string;
Ben Murdoch257744e2011-11-30 15:57:28 +00005010
Ben Murdoch85b71792012-04-11 18:30:58 +01005011 ASSERT(!t0.is(scratch_));
Ben Murdoch257744e2011-11-30 15:57:28 +00005012 ASSERT(!t0.is(index_));
5013 ASSERT(!t0.is(result_));
5014 ASSERT(!t0.is(object_));
5015
5016 // If the receiver is a smi trigger the non-string case.
5017 __ JumpIfSmi(object_, receiver_not_string_);
5018
5019 // Fetch the instance type of the receiver into result register.
5020 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5021 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5022 // If the receiver is not a string trigger the non-string case.
5023 __ And(t0, result_, Operand(kIsNotStringMask));
5024 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5025
5026 // If the index is non-smi trigger the non-smi case.
5027 __ JumpIfNotSmi(index_, &index_not_smi_);
5028
Ben Murdoch85b71792012-04-11 18:30:58 +01005029 // Put smi-tagged index into scratch register.
5030 __ mov(scratch_, index_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005031 __ bind(&got_smi_index_);
5032
5033 // Check for index out of range.
5034 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01005035 __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
Ben Murdoch257744e2011-11-30 15:57:28 +00005036
Ben Murdoch85b71792012-04-11 18:30:58 +01005037 // We need special handling for non-flat strings.
5038 STATIC_ASSERT(kSeqStringTag == 0);
5039 __ And(t0, result_, Operand(kStringRepresentationMask));
5040 __ Branch(&flat_string, eq, t0, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00005041
Ben Murdoch85b71792012-04-11 18:30:58 +01005042 // Handle non-flat strings.
5043 __ And(result_, result_, Operand(kStringRepresentationMask));
5044 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5045 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
5046 __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
5047 __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00005048
Ben Murdoch85b71792012-04-11 18:30:58 +01005049 // ConsString.
5050 // Check whether the right hand side is the empty string (i.e. if
5051 // this is really a flat string in a cons string). If that is not
5052 // the case we would rather go to the runtime system now to flatten
5053 // the string.
5054 Label assure_seq_string;
5055 __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
5056 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
5057 __ Branch(&call_runtime_, ne, result_, Operand(t0));
5058
5059 // Get the first of the two strings and load its instance type.
5060 __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
5061 __ jmp(&assure_seq_string);
5062
5063 // SlicedString, unpack and add offset.
5064 __ bind(&sliced_string);
5065 __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
5066 __ addu(scratch_, scratch_, result_);
5067 __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
5068
5069 // Assure that we are dealing with a sequential string. Go to runtime if not.
5070 __ bind(&assure_seq_string);
5071 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5072 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5073 // Check that parent is not an external string. Go to runtime otherwise.
5074 STATIC_ASSERT(kSeqStringTag == 0);
5075
5076 __ And(t0, result_, Operand(kStringRepresentationMask));
5077 __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
5078
5079 // Check for 1-byte or 2-byte string.
5080 __ bind(&flat_string);
5081 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5082 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5083 __ And(t0, result_, Operand(kStringEncodingMask));
5084 __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
5085
5086 // 2-byte string.
5087 // Load the 2-byte character code into the result register. We can
5088 // add without shifting since the smi tag size is the log2 of the
5089 // number of bytes in a two-byte character.
5090 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
5091 __ Addu(scratch_, object_, Operand(scratch_));
5092 __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
5093 __ Branch(&got_char_code);
5094
5095 // ASCII string.
5096 // Load the byte into the result register.
5097 __ bind(&ascii_string);
5098
5099 __ srl(t0, scratch_, kSmiTagSize);
5100 __ Addu(scratch_, object_, t0);
5101
5102 __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
5103
5104 __ bind(&got_char_code);
Ben Murdoch257744e2011-11-30 15:57:28 +00005105 __ sll(result_, result_, kSmiTagSize);
5106 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005107}
5108
5109
5110void StringCharCodeAtGenerator::GenerateSlow(
Ben Murdoch85b71792012-04-11 18:30:58 +01005111 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005112 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5113
5114 // Index is not a smi.
5115 __ bind(&index_not_smi_);
5116 // If index is a heap number, try converting it to an integer.
5117 __ CheckMap(index_,
Ben Murdoch85b71792012-04-11 18:30:58 +01005118 scratch_,
Ben Murdoch257744e2011-11-30 15:57:28 +00005119 Heap::kHeapNumberMapRootIndex,
5120 index_not_number_,
5121 DONT_DO_SMI_CHECK);
5122 call_helper.BeforeCall(masm);
5123 // Consumed by runtime conversion function:
Ben Murdoch85b71792012-04-11 18:30:58 +01005124 __ Push(object_, index_, index_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005125 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5126 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5127 } else {
5128 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5129 // NumberToSmi discards numbers that are not exact integers.
5130 __ CallRuntime(Runtime::kNumberToSmi, 1);
5131 }
5132
5133 // Save the conversion result before the pop instructions below
5134 // have a chance to overwrite it.
5135
Ben Murdoch85b71792012-04-11 18:30:58 +01005136 __ Move(scratch_, v0);
5137
5138 __ pop(index_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005139 __ pop(object_);
5140 // Reload the instance type.
5141 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5142 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5143 call_helper.AfterCall(masm);
5144 // If index is still not a smi, it must be out of range.
Ben Murdoch85b71792012-04-11 18:30:58 +01005145 __ JumpIfNotSmi(scratch_, index_out_of_range_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005146 // Otherwise, return to the fast path.
5147 __ Branch(&got_smi_index_);
5148
5149 // Call runtime. We get here when the receiver is a string and the
5150 // index is a number, but the code of getting the actual character
5151 // is too complex (e.g., when the string needs to be flattened).
5152 __ bind(&call_runtime_);
5153 call_helper.BeforeCall(masm);
5154 __ Push(object_, index_);
5155 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5156
5157 __ Move(result_, v0);
5158
5159 call_helper.AfterCall(masm);
5160 __ jmp(&exit_);
5161
5162 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005163}
5164
5165
5166// -------------------------------------------------------------------------
5167// StringCharFromCodeGenerator
5168
5169void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005170 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5171
5172 ASSERT(!t0.is(result_));
5173 ASSERT(!t0.is(code_));
5174
5175 STATIC_ASSERT(kSmiTag == 0);
5176 STATIC_ASSERT(kSmiShiftSize == 0);
5177 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5178 __ And(t0,
5179 code_,
5180 Operand(kSmiTagMask |
5181 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5182 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5183
5184 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5185 // At this point code register contains smi tagged ASCII char code.
5186 STATIC_ASSERT(kSmiTag == 0);
5187 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5188 __ Addu(result_, result_, t0);
5189 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5190 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5191 __ Branch(&slow_case_, eq, result_, Operand(t0));
5192 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005193}
5194
5195
5196void StringCharFromCodeGenerator::GenerateSlow(
Ben Murdoch85b71792012-04-11 18:30:58 +01005197 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005198 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5199
5200 __ bind(&slow_case_);
5201 call_helper.BeforeCall(masm);
5202 __ push(code_);
5203 __ CallRuntime(Runtime::kCharFromCode, 1);
5204 __ Move(result_, v0);
5205
5206 call_helper.AfterCall(masm);
5207 __ Branch(&exit_);
5208
5209 __ Abort("Unexpected fallthrough from CharFromCode slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005210}
5211
5212
5213// -------------------------------------------------------------------------
5214// StringCharAtGenerator
5215
5216void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005217 char_code_at_generator_.GenerateFast(masm);
5218 char_from_code_generator_.GenerateFast(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01005219}
5220
5221
5222void StringCharAtGenerator::GenerateSlow(
Ben Murdoch85b71792012-04-11 18:30:58 +01005223 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005224 char_code_at_generator_.GenerateSlow(masm, call_helper);
5225 char_from_code_generator_.GenerateSlow(masm, call_helper);
Steve Block44f0eee2011-05-26 01:26:41 +01005226}
5227
5228
Ben Murdoch85b71792012-04-11 18:30:58 +01005229class StringHelper : public AllStatic {
5230 public:
5231 // Generate code for copying characters using a simple loop. This should only
5232 // be used in places where the number of characters is small and the
5233 // additional setup and checking in GenerateCopyCharactersLong adds too much
5234 // overhead. Copying of overlapping regions is not supported.
5235 // Dest register ends at the position after the last character written.
5236 static void GenerateCopyCharacters(MacroAssembler* masm,
5237 Register dest,
5238 Register src,
5239 Register count,
5240 Register scratch,
5241 bool ascii);
5242
5243 // Generate code for copying a large number of characters. This function
5244 // is allowed to spend extra time setting up conditions to make copying
5245 // faster. Copying of overlapping regions is not supported.
5246 // Dest register ends at the position after the last character written.
5247 static void GenerateCopyCharactersLong(MacroAssembler* masm,
5248 Register dest,
5249 Register src,
5250 Register count,
5251 Register scratch1,
5252 Register scratch2,
5253 Register scratch3,
5254 Register scratch4,
5255 Register scratch5,
5256 int flags);
5257
5258
5259 // Probe the symbol table for a two character string. If the string is
5260 // not found by probing a jump to the label not_found is performed. This jump
5261 // does not guarantee that the string is not in the symbol table. If the
5262 // string is found the code falls through with the string in register r0.
5263 // Contents of both c1 and c2 registers are modified. At the exit c1 is
5264 // guaranteed to contain halfword with low and high bytes equal to
5265 // initial contents of c1 and c2 respectively.
5266 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5267 Register c1,
5268 Register c2,
5269 Register scratch1,
5270 Register scratch2,
5271 Register scratch3,
5272 Register scratch4,
5273 Register scratch5,
5274 Label* not_found);
5275
5276 // Generate string hash.
5277 static void GenerateHashInit(MacroAssembler* masm,
5278 Register hash,
5279 Register character);
5280
5281 static void GenerateHashAddCharacter(MacroAssembler* masm,
5282 Register hash,
5283 Register character);
5284
5285 static void GenerateHashGetHash(MacroAssembler* masm,
5286 Register hash);
5287
5288 private:
5289 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5290};
5291
5292
Steve Block44f0eee2011-05-26 01:26:41 +01005293void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5294 Register dest,
5295 Register src,
5296 Register count,
5297 Register scratch,
5298 bool ascii) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005299 Label loop;
5300 Label done;
5301 // This loop just copies one character at a time, as it is only used for
5302 // very short strings.
5303 if (!ascii) {
5304 __ addu(count, count, count);
5305 }
5306 __ Branch(&done, eq, count, Operand(zero_reg));
5307 __ addu(count, dest, count); // Count now points to the last dest byte.
5308
5309 __ bind(&loop);
5310 __ lbu(scratch, MemOperand(src));
5311 __ addiu(src, src, 1);
5312 __ sb(scratch, MemOperand(dest));
5313 __ addiu(dest, dest, 1);
5314 __ Branch(&loop, lt, dest, Operand(count));
5315
5316 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005317}
5318
5319
5320enum CopyCharactersFlags {
5321 COPY_ASCII = 1,
5322 DEST_ALWAYS_ALIGNED = 2
5323};
5324
5325
5326void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5327 Register dest,
5328 Register src,
5329 Register count,
5330 Register scratch1,
5331 Register scratch2,
5332 Register scratch3,
5333 Register scratch4,
5334 Register scratch5,
5335 int flags) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005336 bool ascii = (flags & COPY_ASCII) != 0;
5337 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5338
5339 if (dest_always_aligned && FLAG_debug_code) {
5340 // Check that destination is actually word aligned if the flag says
5341 // that it is.
5342 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5343 __ Check(eq,
5344 "Destination of copy not aligned.",
5345 scratch4,
5346 Operand(zero_reg));
5347 }
5348
5349 const int kReadAlignment = 4;
5350 const int kReadAlignmentMask = kReadAlignment - 1;
5351 // Ensure that reading an entire aligned word containing the last character
5352 // of a string will not read outside the allocated area (because we pad up
5353 // to kObjectAlignment).
5354 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5355 // Assumes word reads and writes are little endian.
5356 // Nothing to do for zero characters.
5357 Label done;
5358
5359 if (!ascii) {
5360 __ addu(count, count, count);
5361 }
5362 __ Branch(&done, eq, count, Operand(zero_reg));
5363
5364 Label byte_loop;
5365 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5366 __ Subu(scratch1, count, Operand(8));
5367 __ Addu(count, dest, Operand(count));
5368 Register limit = count; // Read until src equals this.
5369 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5370
5371 if (!dest_always_aligned) {
5372 // Align dest by byte copying. Copies between zero and three bytes.
5373 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5374 Label dest_aligned;
5375 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5376 Label aligned_loop;
5377 __ bind(&aligned_loop);
5378 __ lbu(scratch1, MemOperand(src));
5379 __ addiu(src, src, 1);
5380 __ sb(scratch1, MemOperand(dest));
5381 __ addiu(dest, dest, 1);
5382 __ addiu(scratch4, scratch4, 1);
5383 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5384 __ bind(&dest_aligned);
5385 }
5386
5387 Label simple_loop;
5388
5389 __ And(scratch4, src, Operand(kReadAlignmentMask));
5390 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5391
5392 // Loop for src/dst that are not aligned the same way.
5393 // This loop uses lwl and lwr instructions. These instructions
5394 // depend on the endianness, and the implementation assumes little-endian.
5395 {
5396 Label loop;
5397 __ bind(&loop);
5398 __ lwr(scratch1, MemOperand(src));
5399 __ Addu(src, src, Operand(kReadAlignment));
5400 __ lwl(scratch1, MemOperand(src, -1));
5401 __ sw(scratch1, MemOperand(dest));
5402 __ Addu(dest, dest, Operand(kReadAlignment));
5403 __ Subu(scratch2, limit, dest);
5404 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5405 }
5406
5407 __ Branch(&byte_loop);
5408
5409 // Simple loop.
5410 // Copy words from src to dest, until less than four bytes left.
5411 // Both src and dest are word aligned.
5412 __ bind(&simple_loop);
5413 {
5414 Label loop;
5415 __ bind(&loop);
5416 __ lw(scratch1, MemOperand(src));
5417 __ Addu(src, src, Operand(kReadAlignment));
5418 __ sw(scratch1, MemOperand(dest));
5419 __ Addu(dest, dest, Operand(kReadAlignment));
5420 __ Subu(scratch2, limit, dest);
5421 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5422 }
5423
5424 // Copy bytes from src to dest until dest hits limit.
5425 __ bind(&byte_loop);
5426 // Test if dest has already reached the limit.
5427 __ Branch(&done, ge, dest, Operand(limit));
5428 __ lbu(scratch1, MemOperand(src));
5429 __ addiu(src, src, 1);
5430 __ sb(scratch1, MemOperand(dest));
5431 __ addiu(dest, dest, 1);
5432 __ Branch(&byte_loop);
5433
5434 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005435}
5436
5437
5438void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5439 Register c1,
5440 Register c2,
5441 Register scratch1,
5442 Register scratch2,
5443 Register scratch3,
5444 Register scratch4,
5445 Register scratch5,
5446 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005447 // Register scratch3 is the general scratch register in this function.
5448 Register scratch = scratch3;
5449
5450 // Make sure that both characters are not digits as such strings has a
5451 // different hash algorithm. Don't try to look for these in the symbol table.
5452 Label not_array_index;
5453 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5454 __ Branch(&not_array_index,
5455 Ugreater,
5456 scratch,
5457 Operand(static_cast<int>('9' - '0')));
5458 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5459
5460 // If check failed combine both characters into single halfword.
5461 // This is required by the contract of the method: code at the
5462 // not_found branch expects this combination in c1 register.
5463 Label tmp;
5464 __ sll(scratch1, c2, kBitsPerByte);
5465 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5466 __ Or(c1, c1, scratch1);
5467 __ bind(&tmp);
Ben Murdoch85b71792012-04-11 18:30:58 +01005468 __ Branch(not_found,
5469 Uless_equal,
5470 scratch,
5471 Operand(static_cast<int>('9' - '0')));
Ben Murdoch257744e2011-11-30 15:57:28 +00005472
5473 __ bind(&not_array_index);
5474 // Calculate the two character string hash.
5475 Register hash = scratch1;
5476 StringHelper::GenerateHashInit(masm, hash, c1);
5477 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5478 StringHelper::GenerateHashGetHash(masm, hash);
5479
5480 // Collect the two characters in a register.
5481 Register chars = c1;
5482 __ sll(scratch, c2, kBitsPerByte);
5483 __ Or(chars, chars, scratch);
5484
5485 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5486 // hash: hash of two character string.
5487
5488 // Load symbol table.
5489 // Load address of first element of the symbol table.
5490 Register symbol_table = c2;
5491 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5492
5493 Register undefined = scratch4;
5494 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5495
5496 // Calculate capacity mask from the symbol table capacity.
5497 Register mask = scratch2;
5498 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5499 __ sra(mask, mask, 1);
5500 __ Addu(mask, mask, -1);
5501
5502 // Calculate untagged address of the first element of the symbol table.
5503 Register first_symbol_table_element = symbol_table;
5504 __ Addu(first_symbol_table_element, symbol_table,
5505 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5506
5507 // Registers.
5508 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5509 // hash: hash of two character string
5510 // mask: capacity mask
5511 // first_symbol_table_element: address of the first element of
5512 // the symbol table
5513 // undefined: the undefined object
5514 // scratch: -
5515
5516 // Perform a number of probes in the symbol table.
5517 static const int kProbes = 4;
5518 Label found_in_symbol_table;
5519 Label next_probe[kProbes];
5520 Register candidate = scratch5; // Scratch register contains candidate.
5521 for (int i = 0; i < kProbes; i++) {
5522 // Calculate entry in symbol table.
5523 if (i > 0) {
5524 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5525 } else {
5526 __ mov(candidate, hash);
5527 }
5528
5529 __ And(candidate, candidate, Operand(mask));
5530
5531 // Load the entry from the symble table.
5532 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5533 __ sll(scratch, candidate, kPointerSizeLog2);
5534 __ Addu(scratch, scratch, first_symbol_table_element);
5535 __ lw(candidate, MemOperand(scratch));
5536
5537 // If entry is undefined no string with this hash can be found.
5538 Label is_string;
5539 __ GetObjectType(candidate, scratch, scratch);
5540 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5541
5542 __ Branch(not_found, eq, undefined, Operand(candidate));
Ben Murdoch85b71792012-04-11 18:30:58 +01005543 // Must be null (deleted entry).
Ben Murdoch257744e2011-11-30 15:57:28 +00005544 if (FLAG_debug_code) {
Ben Murdoch85b71792012-04-11 18:30:58 +01005545 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
5546 __ Assert(eq, "oddball in symbol table is not undefined or null",
Ben Murdoch257744e2011-11-30 15:57:28 +00005547 scratch, Operand(candidate));
5548 }
5549 __ jmp(&next_probe[i]);
5550
5551 __ bind(&is_string);
5552
5553 // Check that the candidate is a non-external ASCII string. The instance
5554 // type is still in the scratch register from the CompareObjectType
5555 // operation.
5556 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5557
5558 // If length is not 2 the string is not a candidate.
5559 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5560 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5561
5562 // Check if the two characters match.
5563 // Assumes that word load is little endian.
5564 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5565 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5566 __ bind(&next_probe[i]);
5567 }
5568
5569 // No matching 2 character string found by probing.
5570 __ jmp(not_found);
5571
5572 // Scratch register contains result when we fall through to here.
5573 Register result = candidate;
5574 __ bind(&found_in_symbol_table);
5575 __ mov(v0, result);
Steve Block44f0eee2011-05-26 01:26:41 +01005576}
5577
5578
5579void StringHelper::GenerateHashInit(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005580 Register hash,
5581 Register character) {
5582 // hash = seed + character + ((seed + character) << 10);
5583 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5584 // Untag smi seed and add the character.
5585 __ SmiUntag(hash);
Ben Murdoch257744e2011-11-30 15:57:28 +00005586 __ addu(hash, hash, character);
Ben Murdochc7cc0282012-03-05 14:35:55 +00005587 __ sll(at, hash, 10);
5588 __ addu(hash, hash, at);
Ben Murdoch257744e2011-11-30 15:57:28 +00005589 // hash ^= hash >> 6;
Ben Murdoch85b71792012-04-11 18:30:58 +01005590 __ sra(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005591 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005592}
5593
5594
5595void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005596 Register hash,
5597 Register character) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005598 // hash += character;
5599 __ addu(hash, hash, character);
5600 // hash += hash << 10;
5601 __ sll(at, hash, 10);
5602 __ addu(hash, hash, at);
5603 // hash ^= hash >> 6;
Ben Murdoch85b71792012-04-11 18:30:58 +01005604 __ sra(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005605 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005606}
5607
5608
5609void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005610 Register hash) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005611 // hash += hash << 3;
5612 __ sll(at, hash, 3);
5613 __ addu(hash, hash, at);
5614 // hash ^= hash >> 11;
Ben Murdoch85b71792012-04-11 18:30:58 +01005615 __ sra(at, hash, 11);
Ben Murdoch257744e2011-11-30 15:57:28 +00005616 __ xor_(hash, hash, at);
5617 // hash += hash << 15;
5618 __ sll(at, hash, 15);
5619 __ addu(hash, hash, at);
5620
5621 // if (hash == 0) hash = 27;
Ben Murdoch85b71792012-04-11 18:30:58 +01005622 __ ori(at, zero_reg, 27);
5623 __ movz(hash, at, hash);
Steve Block44f0eee2011-05-26 01:26:41 +01005624}
5625
5626
5627void SubStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01005628 Label sub_string_runtime;
Ben Murdoch257744e2011-11-30 15:57:28 +00005629 // Stack frame on entry.
5630 // ra: return address
5631 // sp[0]: to
5632 // sp[4]: from
5633 // sp[8]: string
5634
5635 // This stub is called from the native-call %_SubString(...), so
5636 // nothing can be assumed about the arguments. It is tested that:
5637 // "string" is a sequential string,
5638 // both "from" and "to" are smis, and
5639 // 0 <= from <= to <= string.length.
5640 // If any of these assumptions fail, we call the runtime system.
5641
5642 static const int kToOffset = 0 * kPointerSize;
5643 static const int kFromOffset = 1 * kPointerSize;
5644 static const int kStringOffset = 2 * kPointerSize;
5645
Ben Murdoch85b71792012-04-11 18:30:58 +01005646 Register to = t2;
5647 Register from = t3;
5648
5649 // Check bounds and smi-ness.
5650 __ lw(to, MemOperand(sp, kToOffset));
5651 __ lw(from, MemOperand(sp, kFromOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005652 STATIC_ASSERT(kFromOffset == kToOffset + 4);
5653 STATIC_ASSERT(kSmiTag == 0);
5654 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5655
Ben Murdoch85b71792012-04-11 18:30:58 +01005656 __ JumpIfNotSmi(from, &sub_string_runtime);
5657 __ JumpIfNotSmi(to, &sub_string_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00005658
Ben Murdoch85b71792012-04-11 18:30:58 +01005659 __ sra(a3, from, kSmiTagSize); // Remove smi tag.
5660 __ sra(t5, to, kSmiTagSize); // Remove smi tag.
Ben Murdoch257744e2011-11-30 15:57:28 +00005661
Ben Murdoch85b71792012-04-11 18:30:58 +01005662 // a3: from index (untagged smi)
5663 // t5: to index (untagged smi)
Ben Murdoch257744e2011-11-30 15:57:28 +00005664
Ben Murdoch85b71792012-04-11 18:30:58 +01005665 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
5666
5667 __ subu(a2, t5, a3);
5668 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
5669
5670 // Special handling of sub-strings of length 1 and 2. One character strings
5671 // are handled in the runtime system (looked up in the single character
5672 // cache). Two character strings are looked for in the symbol cache in
5673 // generated code.
5674 __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5675
5676 // Both to and from are smis.
5677
5678 // a2: result string length
5679 // a3: from index (untagged smi)
5680 // t2: (a.k.a. to): to (smi)
5681 // t3: (a.k.a. from): from offset (smi)
5682 // t5: to index (untagged smi)
5683
5684 // Make sure first argument is a sequential (or flat) string.
Ben Murdoch589d6972011-11-30 16:04:58 +00005685 __ lw(v0, MemOperand(sp, kStringOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01005686 __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
5687
Ben Murdoch589d6972011-11-30 16:04:58 +00005688 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005689 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
Ben Murdoch85b71792012-04-11 18:30:58 +01005690 __ And(t4, v0, Operand(kIsNotStringMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00005691
Ben Murdoch85b71792012-04-11 18:30:58 +01005692 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00005693
Ben Murdoch589d6972011-11-30 16:04:58 +00005694 // Short-cut for the case of trivial substring.
5695 Label return_v0;
5696 // v0: original string
5697 // a2: result string length
5698 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
5699 __ sra(t0, t0, 1);
5700 __ Branch(&return_v0, eq, a2, Operand(t0));
5701
Ben Murdoch85b71792012-04-11 18:30:58 +01005702 Label create_slice;
5703 if (FLAG_string_slices) {
5704 __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
5705 }
5706
5707 // v0: original string
5708 // a1: instance type
5709 // a2: result string length
5710 // a3: from index (untagged smi)
5711 // t2: (a.k.a. to): to (smi)
5712 // t3: (a.k.a. from): from offset (smi)
5713 // t5: to index (untagged smi)
5714
5715 Label seq_string;
5716 __ And(t0, a1, Operand(kStringRepresentationMask));
5717 STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5718 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5719 STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
5720
5721 // Slices and external strings go to runtime.
5722 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
5723
5724 // Sequential strings are handled directly.
5725 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
5726
5727 // Cons string. Try to recurse (once) on the first substring.
5728 // (This adds a little more generality than necessary to handle flattened
5729 // cons strings, but not much).
5730 __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
5731 __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
5732 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5733 STATIC_ASSERT(kSeqStringTag == 0);
5734 // Cons, slices and external strings go to runtime.
5735 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
5736
5737 // Definitly a sequential string.
5738 __ bind(&seq_string);
5739
5740 // v0: original string
5741 // a1: instance type
5742 // a2: result string length
5743 // a3: from index (untagged smi)
5744 // t2: (a.k.a. to): to (smi)
5745 // t3: (a.k.a. from): from offset (smi)
5746 // t5: to index (untagged smi)
5747
5748 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
5749 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
5750 to = no_reg;
5751
5752 // v0: original string or left hand side of the original cons string.
5753 // a1: instance type
5754 // a2: result string length
5755 // a3: from index (untagged smi)
5756 // t3: (a.k.a. from): from offset (smi)
5757 // t5: to index (untagged smi)
5758
5759 // Check for flat ASCII string.
5760 Label non_ascii_flat;
5761 STATIC_ASSERT(kTwoByteStringTag == 0);
5762
5763 __ And(t4, a1, Operand(kStringEncodingMask));
5764 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00005765
5766 Label result_longer_than_two;
Ben Murdoch85b71792012-04-11 18:30:58 +01005767 __ Branch(&result_longer_than_two, gt, a2, Operand(2));
Ben Murdoch257744e2011-11-30 15:57:28 +00005768
Ben Murdoch85b71792012-04-11 18:30:58 +01005769 // Sub string of length 2 requested.
Ben Murdoch257744e2011-11-30 15:57:28 +00005770 // Get the two characters forming the sub string.
Ben Murdoch589d6972011-11-30 16:04:58 +00005771 __ Addu(v0, v0, Operand(a3));
5772 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5773 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
Ben Murdoch257744e2011-11-30 15:57:28 +00005774
5775 // Try to lookup two character string in symbol table.
5776 Label make_two_character_string;
5777 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5778 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
Ben Murdoch85b71792012-04-11 18:30:58 +01005779 Counters* counters = masm->isolate()->counters();
Ben Murdoch589d6972011-11-30 16:04:58 +00005780 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005781
5782 // a2: result string length.
5783 // a3: two characters combined into halfword in little endian byte order.
5784 __ bind(&make_two_character_string);
Ben Murdoch85b71792012-04-11 18:30:58 +01005785 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00005786 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00005787 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005788
5789 __ bind(&result_longer_than_two);
5790
Ben Murdoch85b71792012-04-11 18:30:58 +01005791 // Locate 'from' character of string.
5792 __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5793 __ sra(t4, from, 1);
5794 __ Addu(t1, t1, t4);
Ben Murdoch257744e2011-11-30 15:57:28 +00005795
Ben Murdoch85b71792012-04-11 18:30:58 +01005796 // Allocate the result.
5797 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
Ben Murdochc7cc0282012-03-05 14:35:55 +00005798
Ben Murdoch85b71792012-04-11 18:30:58 +01005799 // v0: result string
5800 // a2: result string length
5801 // a3: from index (untagged smi)
5802 // t1: first character of substring to copy
5803 // t3: (a.k.a. from): from offset (smi)
Ben Murdoch257744e2011-11-30 15:57:28 +00005804 // Locate first character of result.
5805 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00005806
Ben Murdoch589d6972011-11-30 16:04:58 +00005807 // v0: result string
5808 // a1: first character of result string
5809 // a2: result string length
5810 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00005811 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5812 StringHelper::GenerateCopyCharactersLong(
5813 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
Ben Murdoch589d6972011-11-30 16:04:58 +00005814 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005815
Ben Murdoch85b71792012-04-11 18:30:58 +01005816 __ bind(&non_ascii_flat);
5817 // a2: result string length
5818 // t1: string
5819 // t3: (a.k.a. from): from offset (smi)
5820 // Check for flat two byte string.
Ben Murdoch257744e2011-11-30 15:57:28 +00005821
Ben Murdoch85b71792012-04-11 18:30:58 +01005822 // Locate 'from' character of string.
5823 __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5824 // As "from" is a smi it is 2 times the value which matches the size of a two
5825 // byte character.
Ben Murdoch589d6972011-11-30 16:04:58 +00005826 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
Ben Murdoch85b71792012-04-11 18:30:58 +01005827 __ Addu(t1, t1, Operand(from));
5828
5829 // Allocate the result.
5830 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
5831
5832 // v0: result string
5833 // a2: result string length
5834 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00005835 // Locate first character of result.
5836 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
Ben Murdoch589d6972011-11-30 16:04:58 +00005837
Ben Murdoch85b71792012-04-11 18:30:58 +01005838 from = no_reg;
5839
Ben Murdoch257744e2011-11-30 15:57:28 +00005840 // v0: result string.
5841 // a1: first character of result.
5842 // a2: result length.
Ben Murdoch589d6972011-11-30 16:04:58 +00005843 // t1: first character of substring to copy.
Ben Murdoch257744e2011-11-30 15:57:28 +00005844 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5845 StringHelper::GenerateCopyCharactersLong(
5846 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
Ben Murdoch85b71792012-04-11 18:30:58 +01005847 __ jmp(&return_v0);
5848
5849 if (FLAG_string_slices) {
5850 __ bind(&create_slice);
5851 // v0: original string
5852 // a1: instance type
5853 // a2: length
5854 // a3: from index (untagged smi)
5855 // t2 (a.k.a. to): to (smi)
5856 // t3 (a.k.a. from): from offset (smi)
5857 Label allocate_slice, sliced_string, seq_string;
5858 STATIC_ASSERT(kSeqStringTag == 0);
5859 __ And(t4, a1, Operand(kStringRepresentationMask));
5860 __ Branch(&seq_string, eq, t4, Operand(zero_reg));
5861 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5862 STATIC_ASSERT(kIsIndirectStringMask != 0);
5863 __ And(t4, a1, Operand(kIsIndirectStringMask));
5864 // External string. Jump to runtime.
5865 __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
5866
5867 __ And(t4, a1, Operand(kSlicedNotConsMask));
5868 __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
5869 // Cons string. Check whether it is flat, then fetch first part.
5870 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
5871 __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
5872 __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
5873 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
5874 __ jmp(&allocate_slice);
5875
5876 __ bind(&sliced_string);
5877 // Sliced string. Fetch parent and correct start index by offset.
5878 __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
5879 __ addu(t3, t3, t1);
5880 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
5881 __ jmp(&allocate_slice);
5882
5883 __ bind(&seq_string);
5884 // Sequential string. Just move string to the right register.
5885 __ mov(t1, v0);
5886
5887 __ bind(&allocate_slice);
5888 // a1: instance type of original string
5889 // a2: length
5890 // t1: underlying subject string
5891 // t3 (a.k.a. from): from offset (smi)
5892 // Allocate new sliced string. At this point we do not reload the instance
5893 // type including the string encoding because we simply rely on the info
5894 // provided by the original string. It does not matter if the original
5895 // string's encoding is wrong because we always have to recheck encoding of
5896 // the newly created string's parent anyways due to externalized strings.
5897 Label two_byte_slice, set_slice_header;
5898 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5899 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5900 __ And(t4, a1, Operand(kStringEncodingMask));
5901 __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
5902 __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
5903 __ jmp(&set_slice_header);
5904 __ bind(&two_byte_slice);
5905 __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
5906 __ bind(&set_slice_header);
5907 __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
5908 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
5909 }
Ben Murdoch589d6972011-11-30 16:04:58 +00005910
5911 __ bind(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005912 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
Ben Murdoch85b71792012-04-11 18:30:58 +01005913 __ Addu(sp, sp, Operand(3 * kPointerSize));
5914 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00005915
5916 // Just jump to runtime to create the sub string.
Ben Murdoch85b71792012-04-11 18:30:58 +01005917 __ bind(&sub_string_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00005918 __ TailCallRuntime(Runtime::kSubString, 3, 1);
5919}
5920
5921
5922void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5923 Register left,
5924 Register right,
5925 Register scratch1,
5926 Register scratch2,
5927 Register scratch3) {
5928 Register length = scratch1;
5929
5930 // Compare lengths.
5931 Label strings_not_equal, check_zero_length;
5932 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
5933 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5934 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
5935 __ bind(&strings_not_equal);
5936 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
5937 __ Ret();
5938
5939 // Check if the length is zero.
5940 Label compare_chars;
5941 __ bind(&check_zero_length);
5942 STATIC_ASSERT(kSmiTag == 0);
5943 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
5944 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5945 __ Ret();
5946
5947 // Compare characters.
5948 __ bind(&compare_chars);
5949
5950 GenerateAsciiCharsCompareLoop(masm,
5951 left, right, length, scratch2, scratch3, v0,
5952 &strings_not_equal);
5953
5954 // Characters are equal.
5955 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5956 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01005957}
5958
5959
5960void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Steve Block44f0eee2011-05-26 01:26:41 +01005961 Register left,
Ben Murdoch257744e2011-11-30 15:57:28 +00005962 Register right,
Steve Block44f0eee2011-05-26 01:26:41 +01005963 Register scratch1,
5964 Register scratch2,
5965 Register scratch3,
5966 Register scratch4) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005967 Label result_not_equal, compare_lengths;
5968 // Find minimum length and length difference.
5969 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
5970 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5971 __ Subu(scratch3, scratch1, Operand(scratch2));
5972 Register length_delta = scratch3;
5973 __ slt(scratch4, scratch2, scratch1);
Ben Murdoch85b71792012-04-11 18:30:58 +01005974 __ movn(scratch1, scratch2, scratch4);
Ben Murdoch257744e2011-11-30 15:57:28 +00005975 Register min_length = scratch1;
5976 STATIC_ASSERT(kSmiTag == 0);
5977 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
5978
5979 // Compare loop.
5980 GenerateAsciiCharsCompareLoop(masm,
5981 left, right, min_length, scratch2, scratch4, v0,
5982 &result_not_equal);
5983
5984 // Compare lengths - strings up to min-length are equal.
5985 __ bind(&compare_lengths);
5986 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
5987 // Use length_delta as result if it's zero.
5988 __ mov(scratch2, length_delta);
5989 __ mov(scratch4, zero_reg);
5990 __ mov(v0, zero_reg);
5991
5992 __ bind(&result_not_equal);
5993 // Conditionally update the result based either on length_delta or
5994 // the last comparion performed in the loop above.
5995 Label ret;
5996 __ Branch(&ret, eq, scratch2, Operand(scratch4));
5997 __ li(v0, Operand(Smi::FromInt(GREATER)));
5998 __ Branch(&ret, gt, scratch2, Operand(scratch4));
5999 __ li(v0, Operand(Smi::FromInt(LESS)));
6000 __ bind(&ret);
6001 __ Ret();
6002}
6003
6004
6005void StringCompareStub::GenerateAsciiCharsCompareLoop(
6006 MacroAssembler* masm,
6007 Register left,
6008 Register right,
6009 Register length,
6010 Register scratch1,
6011 Register scratch2,
6012 Register scratch3,
6013 Label* chars_not_equal) {
6014 // Change index to run from -length to -1 by adding length to string
6015 // start. This means that loop ends when index reaches zero, which
6016 // doesn't need an additional compare.
6017 __ SmiUntag(length);
6018 __ Addu(scratch1, length,
6019 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6020 __ Addu(left, left, Operand(scratch1));
6021 __ Addu(right, right, Operand(scratch1));
6022 __ Subu(length, zero_reg, length);
6023 Register index = length; // index = -length;
6024
6025
6026 // Compare loop.
6027 Label loop;
6028 __ bind(&loop);
6029 __ Addu(scratch3, left, index);
6030 __ lbu(scratch1, MemOperand(scratch3));
6031 __ Addu(scratch3, right, index);
6032 __ lbu(scratch2, MemOperand(scratch3));
6033 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6034 __ Addu(index, index, 1);
6035 __ Branch(&loop, ne, index, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01006036}
6037
6038
6039void StringCompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006040 Label runtime;
6041
6042 Counters* counters = masm->isolate()->counters();
6043
6044 // Stack frame on entry.
6045 // sp[0]: right string
6046 // sp[4]: left string
6047 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6048 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6049
6050 Label not_same;
6051 __ Branch(&not_same, ne, a0, Operand(a1));
6052 STATIC_ASSERT(EQUAL == 0);
6053 STATIC_ASSERT(kSmiTag == 0);
6054 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6055 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
Ben Murdoch85b71792012-04-11 18:30:58 +01006056 __ Addu(sp, sp, Operand(2 * kPointerSize));
6057 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006058
6059 __ bind(&not_same);
6060
6061 // Check that both objects are sequential ASCII strings.
6062 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6063
6064 // Compare flat ASCII strings natively. Remove arguments from stack first.
6065 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6066 __ Addu(sp, sp, Operand(2 * kPointerSize));
6067 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6068
6069 __ bind(&runtime);
6070 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01006071}
6072
6073
6074void StringAddStub::Generate(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01006075 Label string_add_runtime, call_builtin;
Ben Murdoch257744e2011-11-30 15:57:28 +00006076 Builtins::JavaScript builtin_id = Builtins::ADD;
6077
6078 Counters* counters = masm->isolate()->counters();
6079
6080 // Stack on entry:
6081 // sp[0]: second argument (right).
6082 // sp[4]: first argument (left).
6083
6084 // Load the two arguments.
6085 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6086 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6087
6088 // Make sure that both arguments are strings if not known in advance.
6089 if (flags_ == NO_STRING_ADD_FLAGS) {
Ben Murdoch85b71792012-04-11 18:30:58 +01006090 __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006091 // Load instance types.
6092 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6093 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6094 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6095 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6096 STATIC_ASSERT(kStringTag == 0);
6097 // If either is not a string, go to runtime.
6098 __ Or(t4, t0, Operand(t1));
6099 __ And(t4, t4, Operand(kIsNotStringMask));
Ben Murdoch85b71792012-04-11 18:30:58 +01006100 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006101 } else {
6102 // Here at least one of the arguments is definitely a string.
6103 // We convert the one that is not known to be a string.
6104 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6105 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6106 GenerateConvertArgument(
6107 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6108 builtin_id = Builtins::STRING_ADD_RIGHT;
6109 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6110 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6111 GenerateConvertArgument(
6112 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6113 builtin_id = Builtins::STRING_ADD_LEFT;
6114 }
6115 }
6116
6117 // Both arguments are strings.
6118 // a0: first string
6119 // a1: second string
6120 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6121 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6122 {
6123 Label strings_not_empty;
6124 // Check if either of the strings are empty. In that case return the other.
6125 // These tests use zero-length check on string-length whch is an Smi.
6126 // Assert that Smi::FromInt(0) is really 0.
6127 STATIC_ASSERT(kSmiTag == 0);
6128 ASSERT(Smi::FromInt(0) == 0);
6129 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6130 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6131 __ mov(v0, a0); // Assume we'll return first string (from a0).
Ben Murdoch85b71792012-04-11 18:30:58 +01006132 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
Ben Murdoch257744e2011-11-30 15:57:28 +00006133 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6134 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6135 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6136 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6137
6138 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006139 __ Addu(sp, sp, Operand(2 * kPointerSize));
6140 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006141
6142 __ bind(&strings_not_empty);
6143 }
6144
6145 // Untag both string-lengths.
6146 __ sra(a2, a2, kSmiTagSize);
6147 __ sra(a3, a3, kSmiTagSize);
6148
6149 // Both strings are non-empty.
6150 // a0: first string
6151 // a1: second string
6152 // a2: length of first string
6153 // a3: length of second string
6154 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6155 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6156 // Look at the length of the result of adding the two strings.
6157 Label string_add_flat_result, longer_than_two;
6158 // Adding two lengths can't overflow.
6159 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6160 __ Addu(t2, a2, Operand(a3));
6161 // Use the symbol table when adding two one character strings, as it
6162 // helps later optimizations to return a symbol here.
6163 __ Branch(&longer_than_two, ne, t2, Operand(2));
6164
6165 // Check that both strings are non-external ASCII strings.
6166 if (flags_ != NO_STRING_ADD_FLAGS) {
6167 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6168 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6169 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6170 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6171 }
6172 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
Ben Murdoch85b71792012-04-11 18:30:58 +01006173 &string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006174
6175 // Get the two characters forming the sub string.
6176 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6177 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6178
6179 // Try to lookup two character string in symbol table. If it is not found
6180 // just allocate a new one.
6181 Label make_two_character_string;
6182 StringHelper::GenerateTwoCharacterSymbolTableProbe(
Ben Murdoch85b71792012-04-11 18:30:58 +01006183 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
Ben Murdoch257744e2011-11-30 15:57:28 +00006184 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006185 __ Addu(sp, sp, Operand(2 * kPointerSize));
6186 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006187
6188 __ bind(&make_two_character_string);
6189 // Resulting string has length 2 and first chars of two strings
6190 // are combined into single halfword in a2 register.
6191 // So we can fill resulting string without two loops by a single
6192 // halfword store instruction (which assumes that processor is
6193 // in a little endian mode).
6194 __ li(t2, Operand(2));
Ben Murdoch85b71792012-04-11 18:30:58 +01006195 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006196 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6197 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006198 __ Addu(sp, sp, Operand(2 * kPointerSize));
6199 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006200
6201 __ bind(&longer_than_two);
6202 // Check if resulting string will be flat.
Ben Murdoch85b71792012-04-11 18:30:58 +01006203 __ Branch(&string_add_flat_result, lt, t2,
6204 Operand(String::kMinNonFlatLength));
Ben Murdoch257744e2011-11-30 15:57:28 +00006205 // Handle exceptionally long strings in the runtime system.
6206 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6207 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6208 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
Ben Murdoch85b71792012-04-11 18:30:58 +01006209 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
Ben Murdoch257744e2011-11-30 15:57:28 +00006210
6211 // If result is not supposed to be flat, allocate a cons string object.
6212 // If both strings are ASCII the result is an ASCII cons string.
6213 if (flags_ != NO_STRING_ADD_FLAGS) {
6214 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6215 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6216 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6217 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6218 }
6219 Label non_ascii, allocated, ascii_data;
6220 STATIC_ASSERT(kTwoByteStringTag == 0);
Ben Murdoch85b71792012-04-11 18:30:58 +01006221 // Branch to non_ascii if either string-encoding field is zero (non-ascii).
Ben Murdoch257744e2011-11-30 15:57:28 +00006222 __ And(t4, t0, Operand(t1));
6223 __ And(t4, t4, Operand(kStringEncodingMask));
6224 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6225
6226 // Allocate an ASCII cons string.
6227 __ bind(&ascii_data);
Ben Murdoch85b71792012-04-11 18:30:58 +01006228 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006229 __ bind(&allocated);
6230 // Fill the fields of the cons string.
Ben Murdoch85b71792012-04-11 18:30:58 +01006231 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
6232 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
6233 __ mov(v0, t3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006234 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006235 __ Addu(sp, sp, Operand(2 * kPointerSize));
6236 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006237
6238 __ bind(&non_ascii);
6239 // At least one of the strings is two-byte. Check whether it happens
6240 // to contain only ASCII characters.
6241 // t0: first instance type.
6242 // t1: second instance type.
6243 // Branch to if _both_ instances have kAsciiDataHintMask set.
6244 __ And(at, t0, Operand(kAsciiDataHintMask));
6245 __ and_(at, at, t1);
6246 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6247
6248 __ xor_(t0, t0, t1);
6249 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6250 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6251 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6252
6253 // Allocate a two byte cons string.
Ben Murdoch85b71792012-04-11 18:30:58 +01006254 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006255 __ Branch(&allocated);
6256
Ben Murdoch85b71792012-04-11 18:30:58 +01006257 // Handle creating a flat result. First check that both strings are
6258 // sequential and that they have the same encoding.
Ben Murdoch257744e2011-11-30 15:57:28 +00006259 // a0: first string
6260 // a1: second string
6261 // a2: length of first string
6262 // a3: length of second string
6263 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6264 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6265 // t2: sum of lengths.
6266 __ bind(&string_add_flat_result);
6267 if (flags_ != NO_STRING_ADD_FLAGS) {
6268 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6269 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6270 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6271 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6272 }
Ben Murdoch85b71792012-04-11 18:30:58 +01006273 // Check that both strings are sequential, meaning that we
6274 // branch to runtime if either string tag is non-zero.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006275 STATIC_ASSERT(kSeqStringTag == 0);
Ben Murdoch85b71792012-04-11 18:30:58 +01006276 __ Or(t4, t0, Operand(t1));
6277 __ And(t4, t4, Operand(kStringRepresentationMask));
6278 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
Ben Murdochc7cc0282012-03-05 14:35:55 +00006279
Ben Murdoch85b71792012-04-11 18:30:58 +01006280 // Now check if both strings have the same encoding (ASCII/Two-byte).
6281 // a0: first string
6282 // a1: second string
Ben Murdoch257744e2011-11-30 15:57:28 +00006283 // a2: length of first string
6284 // a3: length of second string
Ben Murdoch85b71792012-04-11 18:30:58 +01006285 // t0: first string instance type
6286 // t1: second string instance type
Ben Murdoch257744e2011-11-30 15:57:28 +00006287 // t2: sum of lengths.
Ben Murdoch85b71792012-04-11 18:30:58 +01006288 Label non_ascii_string_add_flat_result;
6289 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
6290 __ xor_(t3, t1, t0);
6291 __ And(t3, t3, Operand(kStringEncodingMask));
6292 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
6293 // And see if it's ASCII (0) or two-byte (1).
6294 __ And(t3, t0, Operand(kStringEncodingMask));
6295 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006296
Ben Murdoch85b71792012-04-11 18:30:58 +01006297 // Both strings are sequential ASCII strings. We also know that they are
6298 // short (since the sum of the lengths is less than kMinNonFlatLength).
6299 // t2: length of resulting flat string
6300 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
6301 // Locate first character of result.
6302 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6303 // Locate first character of first argument.
6304 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6305 // a0: first character of first string.
6306 // a1: second string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006307 // a2: length of first string.
6308 // a3: length of second string.
6309 // t2: first character of result.
Ben Murdoch85b71792012-04-11 18:30:58 +01006310 // t3: result string.
6311 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00006312
Ben Murdoch85b71792012-04-11 18:30:58 +01006313 // Load second argument and locate first character.
6314 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6315 // a1: first character of second string.
6316 // a3: length of second string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006317 // t2: next character of result.
Ben Murdoch85b71792012-04-11 18:30:58 +01006318 // t3: result string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006319 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
Ben Murdoch85b71792012-04-11 18:30:58 +01006320 __ mov(v0, t3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006321 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006322 __ Addu(sp, sp, Operand(2 * kPointerSize));
6323 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006324
6325 __ bind(&non_ascii_string_add_flat_result);
Ben Murdoch85b71792012-04-11 18:30:58 +01006326 // Both strings are sequential two byte strings.
6327 // a0: first string.
6328 // a1: second string.
6329 // a2: length of first string.
6330 // a3: length of second string.
6331 // t2: sum of length of strings.
6332 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6333 // a0: first string.
6334 // a1: second string.
6335 // a2: length of first string.
6336 // a3: length of second string.
6337 // t3: result string.
6338
6339 // Locate first character of result.
6340 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6341 // Locate first character of first argument.
6342 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6343
6344 // a0: first character of first string.
6345 // a1: second string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006346 // a2: length of first string.
6347 // a3: length of second string.
6348 // t2: first character of result.
Ben Murdoch85b71792012-04-11 18:30:58 +01006349 // t3: result string.
6350 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6351
6352 // Locate first character of second argument.
6353 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6354
6355 // a1: first character of second string.
6356 // a3: length of second string.
6357 // t2: next character of result (after copy of first string).
6358 // t3: result string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006359 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6360
Ben Murdoch85b71792012-04-11 18:30:58 +01006361 __ mov(v0, t3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006362 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006363 __ Addu(sp, sp, Operand(2 * kPointerSize));
6364 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006365
6366 // Just jump to runtime to add the two strings.
Ben Murdoch85b71792012-04-11 18:30:58 +01006367 __ bind(&string_add_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006368 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6369
6370 if (call_builtin.is_linked()) {
6371 __ bind(&call_builtin);
6372 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6373 }
6374}
6375
6376
6377void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6378 int stack_offset,
6379 Register arg,
6380 Register scratch1,
6381 Register scratch2,
6382 Register scratch3,
6383 Register scratch4,
6384 Label* slow) {
6385 // First check if the argument is already a string.
6386 Label not_string, done;
6387 __ JumpIfSmi(arg, &not_string);
6388 __ GetObjectType(arg, scratch1, scratch1);
6389 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6390
6391 // Check the number to string cache.
6392 Label not_cached;
6393 __ bind(&not_string);
6394 // Puts the cached result into scratch1.
6395 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6396 arg,
6397 scratch1,
6398 scratch2,
6399 scratch3,
6400 scratch4,
6401 false,
6402 &not_cached);
6403 __ mov(arg, scratch1);
6404 __ sw(arg, MemOperand(sp, stack_offset));
6405 __ jmp(&done);
6406
6407 // Check if the argument is a safe string wrapper.
6408 __ bind(&not_cached);
6409 __ JumpIfSmi(arg, slow);
6410 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6411 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6412 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6413 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6414 __ And(scratch2, scratch2, scratch4);
6415 __ Branch(slow, ne, scratch2, Operand(scratch4));
6416 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6417 __ sw(arg, MemOperand(sp, stack_offset));
6418
6419 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01006420}
6421
6422
6423void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006424 ASSERT(state_ == CompareIC::SMIS);
6425 Label miss;
6426 __ Or(a2, a1, a0);
6427 __ JumpIfNotSmi(a2, &miss);
6428
6429 if (GetCondition() == eq) {
6430 // For equality we do not care about the sign of the result.
6431 __ Subu(v0, a0, a1);
6432 } else {
6433 // Untag before subtracting to avoid handling overflow.
6434 __ SmiUntag(a1);
6435 __ SmiUntag(a0);
6436 __ Subu(v0, a1, a0);
6437 }
6438 __ Ret();
6439
6440 __ bind(&miss);
6441 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006442}
6443
6444
6445void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006446 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6447
6448 Label generic_stub;
Ben Murdoch85b71792012-04-11 18:30:58 +01006449 Label unordered;
Ben Murdoch257744e2011-11-30 15:57:28 +00006450 Label miss;
6451 __ And(a2, a1, Operand(a0));
6452 __ JumpIfSmi(a2, &generic_stub);
6453
6454 __ GetObjectType(a0, a2, a2);
Ben Murdoch85b71792012-04-11 18:30:58 +01006455 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00006456 __ GetObjectType(a1, a2, a2);
Ben Murdoch85b71792012-04-11 18:30:58 +01006457 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00006458
6459 // Inlining the double comparison and falling back to the general compare
6460 // stub if NaN is involved or FPU is unsupported.
6461 if (CpuFeatures::IsSupported(FPU)) {
6462 CpuFeatures::Scope scope(FPU);
6463
6464 // Load left and right operand.
6465 __ Subu(a2, a1, Operand(kHeapObjectTag));
6466 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6467 __ Subu(a2, a0, Operand(kHeapObjectTag));
6468 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6469
Ben Murdoch85b71792012-04-11 18:30:58 +01006470 Label fpu_eq, fpu_lt, fpu_gt;
6471 // Compare operands (test if unordered).
6472 __ c(UN, D, f0, f2);
6473 // Don't base result on status bits when a NaN is involved.
6474 __ bc1t(&unordered);
6475 __ nop();
Ben Murdoch257744e2011-11-30 15:57:28 +00006476
Ben Murdoch85b71792012-04-11 18:30:58 +01006477 // Test if equal.
6478 __ c(EQ, D, f0, f2);
6479 __ bc1t(&fpu_eq);
6480 __ nop();
Ben Murdoch257744e2011-11-30 15:57:28 +00006481
Ben Murdoch85b71792012-04-11 18:30:58 +01006482 // Test if unordered or less (unordered case is already handled).
6483 __ c(ULT, D, f0, f2);
6484 __ bc1t(&fpu_lt);
6485 __ nop();
Ben Murdoch257744e2011-11-30 15:57:28 +00006486
Ben Murdoch85b71792012-04-11 18:30:58 +01006487 // Otherwise it's greater.
6488 __ bc1f(&fpu_gt);
6489 __ nop();
6490
6491 // Return a result of -1, 0, or 1.
Ben Murdoch257744e2011-11-30 15:57:28 +00006492 __ bind(&fpu_eq);
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01006493 __ li(v0, Operand(EQUAL));
6494 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006495
6496 __ bind(&fpu_lt);
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01006497 __ li(v0, Operand(LESS));
6498 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006499
Ben Murdoch85b71792012-04-11 18:30:58 +01006500 __ bind(&fpu_gt);
6501 __ li(v0, Operand(GREATER));
6502 __ Ret();
6503
6504 __ bind(&unordered);
6505 }
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01006506
Ben Murdoch257744e2011-11-30 15:57:28 +00006507 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6508 __ bind(&generic_stub);
6509 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6510
6511 __ bind(&miss);
6512 GenerateMiss(masm);
6513}
6514
6515
6516void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6517 ASSERT(state_ == CompareIC::SYMBOLS);
6518 Label miss;
6519
6520 // Registers containing left and right operands respectively.
6521 Register left = a1;
6522 Register right = a0;
6523 Register tmp1 = a2;
6524 Register tmp2 = a3;
6525
6526 // Check that both operands are heap objects.
6527 __ JumpIfEitherSmi(left, right, &miss);
6528
6529 // Check that both operands are symbols.
6530 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6531 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6532 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6533 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6534 STATIC_ASSERT(kSymbolTag != 0);
6535 __ And(tmp1, tmp1, Operand(tmp2));
6536 __ And(tmp1, tmp1, kIsSymbolMask);
6537 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6538 // Make sure a0 is non-zero. At this point input operands are
6539 // guaranteed to be non-zero.
6540 ASSERT(right.is(a0));
6541 STATIC_ASSERT(EQUAL == 0);
6542 STATIC_ASSERT(kSmiTag == 0);
6543 __ mov(v0, right);
6544 // Symbols are compared by identity.
6545 __ Ret(ne, left, Operand(right));
6546 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6547 __ Ret();
6548
6549 __ bind(&miss);
6550 GenerateMiss(masm);
6551}
6552
6553
6554void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6555 ASSERT(state_ == CompareIC::STRINGS);
6556 Label miss;
6557
6558 // Registers containing left and right operands respectively.
6559 Register left = a1;
6560 Register right = a0;
6561 Register tmp1 = a2;
6562 Register tmp2 = a3;
6563 Register tmp3 = t0;
6564 Register tmp4 = t1;
6565 Register tmp5 = t2;
6566
6567 // Check that both operands are heap objects.
6568 __ JumpIfEitherSmi(left, right, &miss);
6569
6570 // Check that both operands are strings. This leaves the instance
6571 // types loaded in tmp1 and tmp2.
6572 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6573 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6574 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6575 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6576 STATIC_ASSERT(kNotStringTag != 0);
6577 __ Or(tmp3, tmp1, tmp2);
6578 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6579 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6580
6581 // Fast check for identical strings.
6582 Label left_ne_right;
6583 STATIC_ASSERT(EQUAL == 0);
6584 STATIC_ASSERT(kSmiTag == 0);
Ben Murdoch85b71792012-04-11 18:30:58 +01006585 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
Ben Murdoch257744e2011-11-30 15:57:28 +00006586 __ mov(v0, zero_reg); // In the delay slot.
Ben Murdoch85b71792012-04-11 18:30:58 +01006587 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006588 __ bind(&left_ne_right);
6589
6590 // Handle not identical strings.
6591
6592 // Check that both strings are symbols. If they are, we're done
6593 // because we already know they are not identical.
Ben Murdoch85b71792012-04-11 18:30:58 +01006594 ASSERT(GetCondition() == eq);
6595 STATIC_ASSERT(kSymbolTag != 0);
6596 __ And(tmp3, tmp1, Operand(tmp2));
6597 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6598 Label is_symbol;
6599 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6600 __ mov(v0, a0); // In the delay slot.
6601 // Make sure a0 is non-zero. At this point input operands are
6602 // guaranteed to be non-zero.
6603 ASSERT(right.is(a0));
6604 __ Ret();
6605 __ bind(&is_symbol);
Ben Murdoch257744e2011-11-30 15:57:28 +00006606
6607 // Check that both strings are sequential ASCII.
6608 Label runtime;
Ben Murdoch85b71792012-04-11 18:30:58 +01006609 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6610 &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006611
6612 // Compare flat ASCII strings. Returns when done.
Ben Murdoch85b71792012-04-11 18:30:58 +01006613 StringCompareStub::GenerateFlatAsciiStringEquals(
6614 masm, left, right, tmp1, tmp2, tmp3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006615
6616 // Handle more complex cases in runtime.
6617 __ bind(&runtime);
6618 __ Push(left, right);
Ben Murdoch85b71792012-04-11 18:30:58 +01006619 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00006620
6621 __ bind(&miss);
6622 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006623}
6624
6625
6626void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006627 ASSERT(state_ == CompareIC::OBJECTS);
6628 Label miss;
6629 __ And(a2, a1, Operand(a0));
6630 __ JumpIfSmi(a2, &miss);
6631
6632 __ GetObjectType(a0, a2, a2);
6633 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6634 __ GetObjectType(a1, a2, a2);
6635 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6636
6637 ASSERT(GetCondition() == eq);
Ben Murdoch85b71792012-04-11 18:30:58 +01006638 __ Subu(v0, a0, Operand(a1));
6639 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006640
6641 __ bind(&miss);
6642 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006643}
6644
6645
Ben Murdochc7cc0282012-03-05 14:35:55 +00006646void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
Ben Murdoch85b71792012-04-11 18:30:58 +01006647 __ Push(a1, a0);
6648 __ push(ra);
6649
6650 // Call the runtime system in a fresh internal frame.
6651 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6652 masm->isolate());
6653 __ EnterInternalFrame();
6654 __ Push(a1, a0);
6655 __ li(t0, Operand(Smi::FromInt(op_)));
6656 __ push(t0);
6657 __ CallExternalReference(miss, 3);
6658 __ LeaveInternalFrame();
6659 // Compute the entry point of the rewritten stub.
6660 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6661 // Restore registers.
6662 __ pop(ra);
6663 __ pop(a0);
6664 __ pop(a1);
Ben Murdoch257744e2011-11-30 15:57:28 +00006665 __ Jump(a2);
6666}
6667
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006668
Ben Murdoch257744e2011-11-30 15:57:28 +00006669void DirectCEntryStub::Generate(MacroAssembler* masm) {
6670 // No need to pop or drop anything, LeaveExitFrame will restore the old
6671 // stack, thus dropping the allocated space for the return value.
6672 // The saved ra is after the reserved stack space for the 4 args.
6673 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6674
Ben Murdoch85b71792012-04-11 18:30:58 +01006675 if (FLAG_debug_code && EnableSlowAsserts()) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006676 // In case of an error the return address may point to a memory area
6677 // filled with kZapValue by the GC.
6678 // Dereference the address and check for this.
6679 __ lw(t0, MemOperand(t9));
6680 __ Assert(ne, "Received invalid return address.", t0,
6681 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6682 }
6683 __ Jump(t9);
Steve Block44f0eee2011-05-26 01:26:41 +01006684}
6685
6686
Ben Murdoch257744e2011-11-30 15:57:28 +00006687void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6688 ExternalReference function) {
6689 __ li(t9, Operand(function));
6690 this->GenerateCall(masm, t9);
6691}
6692
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006693
Ben Murdoch257744e2011-11-30 15:57:28 +00006694void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6695 Register target) {
6696 __ Move(t9, target);
6697 __ AssertStackIsAligned();
6698 // Allocate space for arg slots.
6699 __ Subu(sp, sp, kCArgsSlotsSize);
6700
6701 // Block the trampoline pool through the whole function to make sure the
6702 // number of generated instructions is constant.
6703 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6704
6705 // We need to get the current 'pc' value, which is not available on MIPS.
6706 Label find_ra;
6707 masm->bal(&find_ra); // ra = pc + 8.
6708 masm->nop(); // Branch delay slot nop.
6709 masm->bind(&find_ra);
6710
6711 const int kNumInstructionsToJump = 6;
6712 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6713 // Push return address (accessible to GC through exit frame pc).
6714 // This spot for ra was reserved in EnterExitFrame.
6715 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
Ben Murdoch85b71792012-04-11 18:30:58 +01006716 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6717 RelocInfo::CODE_TARGET), true);
Ben Murdoch257744e2011-11-30 15:57:28 +00006718 // Call the function.
6719 masm->Jump(t9);
6720 // Make sure the stored 'ra' points to this position.
6721 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6722}
6723
6724
Ben Murdoch85b71792012-04-11 18:30:58 +01006725MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6726 MacroAssembler* masm,
6727 Label* miss,
6728 Label* done,
6729 Register receiver,
6730 Register properties,
6731 String* name,
6732 Register scratch0) {
6733// If names of slots in range from 1 to kProbes - 1 for the hash value are
Ben Murdoch257744e2011-11-30 15:57:28 +00006734 // not equal to the name and kProbes-th slot is not used (its name is the
6735 // undefined value), it guarantees the hash table doesn't contain the
6736 // property. It's true even if some slots represent deleted properties
Ben Murdoch85b71792012-04-11 18:30:58 +01006737 // (their names are the null value).
Ben Murdoch257744e2011-11-30 15:57:28 +00006738 for (int i = 0; i < kInlinedProbes; i++) {
6739 // scratch0 points to properties hash.
6740 // Compute the masked index: (hash + i + i * i) & mask.
6741 Register index = scratch0;
6742 // Capacity is smi 2^n.
6743 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
6744 __ Subu(index, index, Operand(1));
6745 __ And(index, index, Operand(
Ben Murdoch85b71792012-04-11 18:30:58 +01006746 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
Ben Murdoch257744e2011-11-30 15:57:28 +00006747
6748 // Scale the index by multiplying by the entry size.
6749 ASSERT(StringDictionary::kEntrySize == 3);
Ben Murdoch85b71792012-04-11 18:30:58 +01006750 // index *= 3.
6751 __ mov(at, index);
6752 __ sll(index, index, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00006753 __ Addu(index, index, at);
6754
6755 Register entity_name = scratch0;
6756 // Having undefined at this place means the name is not contained.
6757 ASSERT_EQ(kSmiTagSize, 1);
6758 Register tmp = properties;
Ben Murdoch85b71792012-04-11 18:30:58 +01006759
Ben Murdoch257744e2011-11-30 15:57:28 +00006760 __ sll(scratch0, index, 1);
6761 __ Addu(tmp, properties, scratch0);
6762 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6763
6764 ASSERT(!tmp.is(entity_name));
6765 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6766 __ Branch(done, eq, entity_name, Operand(tmp));
6767
6768 if (i != kInlinedProbes - 1) {
6769 // Stop if found the property.
6770 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
6771
6772 // Check if the entry name is not a symbol.
6773 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6774 __ lbu(entity_name,
6775 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6776 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
6777 __ Branch(miss, eq, scratch0, Operand(zero_reg));
6778
6779 // Restore the properties.
6780 __ lw(properties,
6781 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6782 }
6783 }
6784
6785 const int spill_mask =
6786 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
Ben Murdoch85b71792012-04-11 18:30:58 +01006787 a2.bit() | a1.bit() | a0.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00006788
6789 __ MultiPush(spill_mask);
6790 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6791 __ li(a1, Operand(Handle<String>(name)));
6792 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
Ben Murdoch85b71792012-04-11 18:30:58 +01006793 MaybeObject* result = masm->TryCallStub(&stub);
6794 if (result->IsFailure()) return result;
Ben Murdoch257744e2011-11-30 15:57:28 +00006795 __ MultiPop(spill_mask);
6796
Ben Murdoch85b71792012-04-11 18:30:58 +01006797 __ Branch(done, eq, v0, Operand(zero_reg));
6798 __ Branch(miss, ne, v0, Operand(zero_reg));
6799 return result;
Ben Murdoch257744e2011-11-30 15:57:28 +00006800}
6801
6802
6803// Probe the string dictionary in the |elements| register. Jump to the
6804// |done| label if a property with the given name is found. Jump to
6805// the |miss| label otherwise.
6806// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6807void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6808 Label* miss,
6809 Label* done,
6810 Register elements,
6811 Register name,
6812 Register scratch1,
6813 Register scratch2) {
6814 // Assert that name contains a string.
6815 if (FLAG_debug_code) __ AbortIfNotString(name);
6816
6817 // Compute the capacity mask.
6818 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
6819 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
6820 __ Subu(scratch1, scratch1, Operand(1));
6821
6822 // Generate an unrolled loop that performs a few probes before
6823 // giving up. Measurements done on Gmail indicate that 2 probes
6824 // cover ~93% of loads from dictionaries.
6825 for (int i = 0; i < kInlinedProbes; i++) {
6826 // Compute the masked index: (hash + i + i * i) & mask.
6827 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6828 if (i > 0) {
6829 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6830 // the hash in a separate instruction. The value hash + i + i * i is right
6831 // shifted in the following and instruction.
6832 ASSERT(StringDictionary::GetProbeOffset(i) <
6833 1 << (32 - String::kHashFieldOffset));
6834 __ Addu(scratch2, scratch2, Operand(
6835 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6836 }
6837 __ srl(scratch2, scratch2, String::kHashShift);
6838 __ And(scratch2, scratch1, scratch2);
6839
6840 // Scale the index by multiplying by the element size.
6841 ASSERT(StringDictionary::kEntrySize == 3);
6842 // scratch2 = scratch2 * 3.
6843
Ben Murdoch85b71792012-04-11 18:30:58 +01006844 __ mov(at, scratch2);
6845 __ sll(scratch2, scratch2, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00006846 __ Addu(scratch2, scratch2, at);
6847
6848 // Check if the key is identical to the name.
6849 __ sll(at, scratch2, 2);
6850 __ Addu(scratch2, elements, at);
6851 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
6852 __ Branch(done, eq, name, Operand(at));
6853 }
6854
6855 const int spill_mask =
6856 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
Ben Murdoch85b71792012-04-11 18:30:58 +01006857 a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
Ben Murdoch257744e2011-11-30 15:57:28 +00006858 ~(scratch1.bit() | scratch2.bit());
6859
6860 __ MultiPush(spill_mask);
Ben Murdoch85b71792012-04-11 18:30:58 +01006861 __ Move(a0, elements);
6862 __ Move(a1, name);
Ben Murdoch257744e2011-11-30 15:57:28 +00006863 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6864 __ CallStub(&stub);
6865 __ mov(scratch2, a2);
6866 __ MultiPop(spill_mask);
6867
Ben Murdoch85b71792012-04-11 18:30:58 +01006868 __ Branch(done, ne, v0, Operand(zero_reg));
6869 __ Branch(miss, eq, v0, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006870}
6871
6872
6873void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6874 // Registers:
6875 // result: StringDictionary to probe
6876 // a1: key
6877 // : StringDictionary to probe.
6878 // index_: will hold an index of entry if lookup is successful.
6879 // might alias with result_.
6880 // Returns:
6881 // result_ is zero if lookup failed, non zero otherwise.
6882
6883 Register result = v0;
6884 Register dictionary = a0;
6885 Register key = a1;
6886 Register index = a2;
6887 Register mask = a3;
6888 Register hash = t0;
6889 Register undefined = t1;
6890 Register entry_key = t2;
6891
6892 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6893
6894 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
6895 __ sra(mask, mask, kSmiTagSize);
6896 __ Subu(mask, mask, Operand(1));
6897
6898 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
6899
6900 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6901
6902 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6903 // Compute the masked index: (hash + i + i * i) & mask.
6904 // Capacity is smi 2^n.
6905 if (i > 0) {
6906 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6907 // the hash in a separate instruction. The value hash + i + i * i is right
6908 // shifted in the following and instruction.
6909 ASSERT(StringDictionary::GetProbeOffset(i) <
6910 1 << (32 - String::kHashFieldOffset));
6911 __ Addu(index, hash, Operand(
6912 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6913 } else {
6914 __ mov(index, hash);
6915 }
6916 __ srl(index, index, String::kHashShift);
6917 __ And(index, mask, index);
6918
6919 // Scale the index by multiplying by the entry size.
6920 ASSERT(StringDictionary::kEntrySize == 3);
6921 // index *= 3.
6922 __ mov(at, index);
6923 __ sll(index, index, 1);
6924 __ Addu(index, index, at);
6925
6926
6927 ASSERT_EQ(kSmiTagSize, 1);
6928 __ sll(index, index, 2);
6929 __ Addu(index, index, dictionary);
6930 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
6931
6932 // Having undefined at this place means the name is not contained.
6933 __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
6934
6935 // Stop if found the property.
6936 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
6937
6938 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6939 // Check if the entry name is not a symbol.
6940 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
6941 __ lbu(entry_key,
6942 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
6943 __ And(result, entry_key, Operand(kIsSymbolMask));
6944 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
6945 }
6946 }
6947
6948 __ bind(&maybe_in_dictionary);
6949 // If we are doing negative lookup then probing failure should be
6950 // treated as a lookup success. For positive lookup probing failure
6951 // should be treated as lookup failure.
6952 if (mode_ == POSITIVE_LOOKUP) {
6953 __ mov(result, zero_reg);
Ben Murdoch85b71792012-04-11 18:30:58 +01006954 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006955 }
6956
6957 __ bind(&in_dictionary);
6958 __ li(result, 1);
Ben Murdoch85b71792012-04-11 18:30:58 +01006959 __ Ret();
Ben Murdoch257744e2011-11-30 15:57:28 +00006960
6961 __ bind(&not_in_dictionary);
6962 __ mov(result, zero_reg);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006963 __ Ret();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006964}
6965
6966
Steve Block44f0eee2011-05-26 01:26:41 +01006967#undef __
6968
6969} } // namespace v8::internal
6970
6971#endif // V8_TARGET_ARCH_MIPS