blob: 289e6b810769568551ebf243adfbaa9b95def883 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000034#include "codegen.h"
Steve Block44f0eee2011-05-26 01:26:41 +010035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
Ben Murdoch257744e2011-11-30 15:57:28 +000043static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44 Label* slow,
45 Condition cc,
46 bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Register lhs,
49 Register rhs,
50 Label* rhs_not_nan,
51 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55 Register lhs,
56 Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
Steve Block44f0eee2011-05-26 01:26:41 +010068
69void ToNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000070 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ mov(v0, a0);
74 __ Ret();
75
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ mov(v0, a0);
79 __ Ret();
80
81 __ bind(&call_builtin);
82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +010084}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000088 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
90 Label gc;
91
92 // Pop the function info from the stack.
93 __ pop(a3);
94
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
97 v0,
98 a1,
99 a2,
100 &gc,
101 TAG_OBJECT);
102
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000103 int map_index = (language_mode_ == CLASSIC_MODE)
104 ? Context::FUNCTION_MAP_INDEX
105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
Ben Murdoch257744e2011-11-30 15:57:28 +0000106
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132
133 // Return result. The argument function info has been popped already.
134 __ Ret();
135
136 // Create a new closure through the slower runtime call.
137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000145 // Try to allocate the context in new space.
146 Label gc;
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
151 v0,
152 a1,
153 a2,
154 &gc,
155 TAG_OBJECT);
156
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
159
Ben Murdochc7cc0282012-03-05 14:35:55 +0000160 // Set up the object header.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
Ben Murdoch257744e2011-11-30 15:57:28 +0000162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
Ben Murdochc7cc0282012-03-05 14:35:55 +0000166 // Set up the fixed slots.
Ben Murdoch257744e2011-11-30 15:57:28 +0000167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +0000170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
171
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000172 // Copy the global object from the previous context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
175
176 // Initialize the rest of the slots to undefined.
177 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
178 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
179 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
180 }
181
182 // Remove the on-stack argument and return.
183 __ mov(cp, v0);
184 __ Pop();
185 __ Ret();
186
187 // Need to collect. Call into runtime system.
188 __ bind(&gc);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000189 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100190}
191
192
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000193void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
194 // Stack layout on entry:
195 //
196 // [sp]: function.
197 // [sp + kPointerSize]: serialized scope info
198
199 // Try to allocate the context in new space.
200 Label gc;
201 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
202 __ AllocateInNewSpace(FixedArray::SizeFor(length),
203 v0, a1, a2, &gc, TAG_OBJECT);
204
205 // Load the function from the stack.
206 __ lw(a3, MemOperand(sp, 0));
207
208 // Load the serialized scope info from the stack.
209 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
210
Ben Murdochc7cc0282012-03-05 14:35:55 +0000211 // Set up the object header.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000212 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
213 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
214 __ li(a2, Operand(Smi::FromInt(length)));
215 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
216
217 // If this block context is nested in the global context we get a smi
218 // sentinel instead of a function. The block context should get the
219 // canonical empty function of the global context as its closure which
220 // we still have to look up.
221 Label after_sentinel;
222 __ JumpIfNotSmi(a3, &after_sentinel);
223 if (FLAG_debug_code) {
224 const char* message = "Expected 0 as a Smi sentinel";
225 __ Assert(eq, message, a3, Operand(zero_reg));
226 }
227 __ lw(a3, GlobalObjectOperand());
228 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
229 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
230 __ bind(&after_sentinel);
231
Ben Murdochc7cc0282012-03-05 14:35:55 +0000232 // Set up the fixed slots.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000233 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
234 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
235 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
236
237 // Copy the global object from the previous context.
238 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
239 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
240
241 // Initialize the rest of the slots to the hole value.
242 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
243 for (int i = 0; i < slots_; i++) {
244 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
245 }
246
247 // Remove the on-stack argument and return.
248 __ mov(cp, v0);
249 __ Addu(sp, sp, Operand(2 * kPointerSize));
250 __ Ret();
251
252 // Need to collect. Call into runtime system.
253 __ bind(&gc);
254 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
255}
256
257
258static void GenerateFastCloneShallowArrayCommon(
259 MacroAssembler* masm,
260 int length,
261 FastCloneShallowArrayStub::Mode mode,
262 Label* fail) {
263 // Registers on entry:
264 // a3: boilerplate literal array.
265 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
266
267 // All sizes here are multiples of kPointerSize.
268 int elements_size = 0;
269 if (length > 0) {
270 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
271 ? FixedDoubleArray::SizeFor(length)
272 : FixedArray::SizeFor(length);
273 }
274 int size = JSArray::kSize + elements_size;
275
276 // Allocate both the JS array and the elements array in one big
277 // allocation. This avoids multiple limit checks.
278 __ AllocateInNewSpace(size,
279 v0,
280 a1,
281 a2,
282 fail,
283 TAG_OBJECT);
284
285 // Copy the JS array part.
286 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
287 if ((i != JSArray::kElementsOffset) || (length == 0)) {
288 __ lw(a1, FieldMemOperand(a3, i));
289 __ sw(a1, FieldMemOperand(v0, i));
290 }
291 }
292
293 if (length > 0) {
294 // Get hold of the elements array of the boilerplate and setup the
295 // elements pointer in the resulting object.
296 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
297 __ Addu(a2, v0, Operand(JSArray::kSize));
298 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
299
300 // Copy the elements array.
301 ASSERT((elements_size % kPointerSize) == 0);
302 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
303 }
304}
305
Steve Block44f0eee2011-05-26 01:26:41 +0100306void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000307 // Stack layout on entry:
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000308 //
Ben Murdoch257744e2011-11-30 15:57:28 +0000309 // [sp]: constant elements.
310 // [sp + kPointerSize]: literal index.
311 // [sp + (2 * kPointerSize)]: literals array.
312
Ben Murdoch257744e2011-11-30 15:57:28 +0000313 // Load boilerplate object into r3 and check if we need to create a
314 // boilerplate.
315 Label slow_case;
316 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
317 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
318 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
319 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
320 __ Addu(t0, a3, t0);
321 __ lw(a3, MemOperand(t0));
322 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
323 __ Branch(&slow_case, eq, a3, Operand(t1));
324
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000325 FastCloneShallowArrayStub::Mode mode = mode_;
326 if (mode == CLONE_ANY_ELEMENTS) {
327 Label double_elements, check_fast_elements;
328 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
329 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
330 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
331 __ Branch(&check_fast_elements, ne, v0, Operand(t1));
332 GenerateFastCloneShallowArrayCommon(masm, 0,
333 COPY_ON_WRITE_ELEMENTS, &slow_case);
334 // Return and remove the on-stack parameters.
335 __ DropAndRet(3);
336
337 __ bind(&check_fast_elements);
338 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
339 __ Branch(&double_elements, ne, v0, Operand(t1));
340 GenerateFastCloneShallowArrayCommon(masm, length_,
341 CLONE_ELEMENTS, &slow_case);
342 // Return and remove the on-stack parameters.
343 __ DropAndRet(3);
344
345 __ bind(&double_elements);
346 mode = CLONE_DOUBLE_ELEMENTS;
347 // Fall through to generate the code to handle double elements.
348 }
349
Ben Murdoch257744e2011-11-30 15:57:28 +0000350 if (FLAG_debug_code) {
351 const char* message;
352 Heap::RootListIndex expected_map_index;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000353 if (mode == CLONE_ELEMENTS) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000354 message = "Expected (writable) fixed array";
355 expected_map_index = Heap::kFixedArrayMapRootIndex;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000356 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
357 message = "Expected (writable) fixed double array";
358 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +0000359 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000360 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
Ben Murdoch257744e2011-11-30 15:57:28 +0000361 message = "Expected copy-on-write fixed array";
362 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
363 }
364 __ push(a3);
365 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
366 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
367 __ LoadRoot(at, expected_map_index);
368 __ Assert(eq, message, a3, Operand(at));
369 __ pop(a3);
370 }
371
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000372 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
Ben Murdoch257744e2011-11-30 15:57:28 +0000373
374 // Return and remove the on-stack parameters.
375 __ Addu(sp, sp, Operand(3 * kPointerSize));
376 __ Ret();
377
378 __ bind(&slow_case);
379 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100380}
381
382
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000383void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
384 // Stack layout on entry:
385 //
386 // [sp]: object literal flags.
387 // [sp + kPointerSize]: constant properties.
388 // [sp + (2 * kPointerSize)]: literal index.
389 // [sp + (3 * kPointerSize)]: literals array.
390
391 // Load boilerplate object into a3 and check if we need to create a
392 // boilerplate.
393 Label slow_case;
394 __ lw(a3, MemOperand(sp, 3 * kPointerSize));
395 __ lw(a0, MemOperand(sp, 2 * kPointerSize));
396 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
397 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
398 __ Addu(a3, t0, a3);
399 __ lw(a3, MemOperand(a3));
400 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
401 __ Branch(&slow_case, eq, a3, Operand(t0));
402
403 // Check that the boilerplate contains only fast properties and we can
404 // statically determine the instance size.
405 int size = JSObject::kHeaderSize + length_ * kPointerSize;
406 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
407 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
408 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
409
410 // Allocate the JS object and copy header together with all in-object
411 // properties from the boilerplate.
412 __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
413 for (int i = 0; i < size; i += kPointerSize) {
414 __ lw(a1, FieldMemOperand(a3, i));
415 __ sw(a1, FieldMemOperand(a0, i));
416 }
417
418 // Return and remove the on-stack parameters.
419 __ Drop(4);
420 __ Ret(USE_DELAY_SLOT);
421 __ mov(v0, a0);
422
423 __ bind(&slow_case);
424 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
425}
426
427
Steve Block44f0eee2011-05-26 01:26:41 +0100428// Takes a Smi and converts to an IEEE 64 bit floating point value in two
429// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
430// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
431// scratch register. Destroys the source register. No GC occurs during this
432// stub so you don't have to set up the frame.
433class ConvertToDoubleStub : public CodeStub {
434 public:
435 ConvertToDoubleStub(Register result_reg_1,
436 Register result_reg_2,
437 Register source_reg,
438 Register scratch_reg)
439 : result1_(result_reg_1),
440 result2_(result_reg_2),
441 source_(source_reg),
442 zeros_(scratch_reg) { }
443
444 private:
445 Register result1_;
446 Register result2_;
447 Register source_;
448 Register zeros_;
449
450 // Minor key encoding in 16 bits.
451 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
452 class OpBits: public BitField<Token::Value, 2, 14> {};
453
454 Major MajorKey() { return ConvertToDouble; }
455 int MinorKey() {
456 // Encode the parameters in a unique 16 bit value.
457 return result1_.code() +
458 (result2_.code() << 4) +
459 (source_.code() << 8) +
460 (zeros_.code() << 12);
461 }
462
463 void Generate(MacroAssembler* masm);
Steve Block44f0eee2011-05-26 01:26:41 +0100464};
465
466
467void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000468#ifndef BIG_ENDIAN_FLOATING_POINT
469 Register exponent = result1_;
470 Register mantissa = result2_;
471#else
472 Register exponent = result2_;
473 Register mantissa = result1_;
474#endif
475 Label not_special;
476 // Convert from Smi to integer.
477 __ sra(source_, source_, kSmiTagSize);
478 // Move sign bit from source to destination. This works because the sign bit
479 // in the exponent word of the double has the same position and polarity as
480 // the 2's complement sign bit in a Smi.
481 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
482 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
483 // Subtract from 0 if source was negative.
484 __ subu(at, zero_reg, source_);
485 __ movn(source_, at, exponent);
486
487 // We have -1, 0 or 1, which we treat specially. Register source_ contains
488 // absolute value: it is either equal to 1 (special case of -1 and 1),
489 // greater than 1 (not a special case) or less than 1 (special case of 0).
490 __ Branch(&not_special, gt, source_, Operand(1));
491
492 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
493 static const uint32_t exponent_word_for_1 =
494 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
495 // Safe to use 'at' as dest reg here.
496 __ Or(at, exponent, Operand(exponent_word_for_1));
497 __ movn(exponent, at, source_); // Write exp when source not 0.
498 // 1, 0 and -1 all have 0 for the second word.
499 __ mov(mantissa, zero_reg);
500 __ Ret();
501
502 __ bind(&not_special);
503 // Count leading zeros.
504 // Gets the wrong answer for 0, but we already checked for that case above.
505 __ clz(zeros_, source_);
506 // Compute exponent and or it into the exponent register.
507 // We use mantissa as a scratch register here.
508 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
509 __ subu(mantissa, mantissa, zeros_);
510 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
511 __ Or(exponent, exponent, mantissa);
512
513 // Shift up the source chopping the top bit off.
514 __ Addu(zeros_, zeros_, Operand(1));
515 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
516 __ sllv(source_, source_, zeros_);
517 // Compute lower part of fraction (last 12 bits).
518 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
519 // And the top (top 20 bits).
520 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
521 __ or_(exponent, exponent, source_);
522
523 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100524}
525
526
Steve Block44f0eee2011-05-26 01:26:41 +0100527void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
528 FloatingPointHelper::Destination destination,
529 Register scratch1,
530 Register scratch2) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000531 if (CpuFeatures::IsSupported(FPU)) {
532 CpuFeatures::Scope scope(FPU);
533 __ sra(scratch1, a0, kSmiTagSize);
534 __ mtc1(scratch1, f14);
535 __ cvt_d_w(f14, f14);
536 __ sra(scratch1, a1, kSmiTagSize);
537 __ mtc1(scratch1, f12);
538 __ cvt_d_w(f12, f12);
539 if (destination == kCoreRegisters) {
540 __ Move(a2, a3, f14);
541 __ Move(a0, a1, f12);
542 }
543 } else {
544 ASSERT(destination == kCoreRegisters);
545 // Write Smi from a0 to a3 and a2 in double format.
546 __ mov(scratch1, a0);
547 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
548 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000549 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000550 // Write Smi from a1 to a1 and a0 in double format.
551 __ mov(scratch1, a1);
552 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000553 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000554 __ pop(ra);
555 }
Steve Block44f0eee2011-05-26 01:26:41 +0100556}
557
558
559void FloatingPointHelper::LoadOperands(
560 MacroAssembler* masm,
561 FloatingPointHelper::Destination destination,
562 Register heap_number_map,
563 Register scratch1,
564 Register scratch2,
565 Label* slow) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000566
567 // Load right operand (a0) to f12 or a2/a3.
568 LoadNumber(masm, destination,
569 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
570
571 // Load left operand (a1) to f14 or a0/a1.
572 LoadNumber(masm, destination,
573 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
Steve Block44f0eee2011-05-26 01:26:41 +0100574}
575
576
577void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
578 Destination destination,
579 Register object,
580 FPURegister dst,
581 Register dst1,
582 Register dst2,
583 Register heap_number_map,
584 Register scratch1,
585 Register scratch2,
586 Label* not_number) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000587 if (FLAG_debug_code) {
588 __ AbortIfNotRootValue(heap_number_map,
589 Heap::kHeapNumberMapRootIndex,
590 "HeapNumberMap register clobbered.");
591 }
592
593 Label is_smi, done;
594
595 __ JumpIfSmi(object, &is_smi);
596 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
597
598 // Handle loading a double from a heap number.
599 if (CpuFeatures::IsSupported(FPU) &&
600 destination == kFPURegisters) {
601 CpuFeatures::Scope scope(FPU);
602 // Load the double from tagged HeapNumber to double register.
603
604 // ARM uses a workaround here because of the unaligned HeapNumber
605 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
606 // point in generating even more instructions.
607 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
608 } else {
609 ASSERT(destination == kCoreRegisters);
610 // Load the double from heap number to dst1 and dst2 in double format.
611 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
612 __ lw(dst2, FieldMemOperand(object,
613 HeapNumber::kValueOffset + kPointerSize));
614 }
615 __ Branch(&done);
616
617 // Handle loading a double from a smi.
618 __ bind(&is_smi);
619 if (CpuFeatures::IsSupported(FPU)) {
620 CpuFeatures::Scope scope(FPU);
621 // Convert smi to double using FPU instructions.
622 __ SmiUntag(scratch1, object);
623 __ mtc1(scratch1, dst);
624 __ cvt_d_w(dst, dst);
625 if (destination == kCoreRegisters) {
626 // Load the converted smi to dst1 and dst2 in double format.
627 __ Move(dst1, dst2, dst);
628 }
629 } else {
630 ASSERT(destination == kCoreRegisters);
631 // Write smi to dst1 and dst2 double format.
632 __ mov(scratch1, object);
633 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
634 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000635 __ Call(stub.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +0000636 __ pop(ra);
637 }
638
639 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +0100640}
641
642
Ben Murdoch257744e2011-11-30 15:57:28 +0000643void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
644 Register object,
645 Register dst,
646 Register heap_number_map,
647 Register scratch1,
648 Register scratch2,
649 Register scratch3,
650 FPURegister double_scratch,
651 Label* not_number) {
652 if (FLAG_debug_code) {
653 __ AbortIfNotRootValue(heap_number_map,
654 Heap::kHeapNumberMapRootIndex,
655 "HeapNumberMap register clobbered.");
656 }
657 Label is_smi;
658 Label done;
659 Label not_in_int32_range;
660
661 __ JumpIfSmi(object, &is_smi);
662 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
663 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
664 __ ConvertToInt32(object,
665 dst,
666 scratch1,
667 scratch2,
668 double_scratch,
669 &not_in_int32_range);
670 __ jmp(&done);
671
672 __ bind(&not_in_int32_range);
673 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
674 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
675
676 __ EmitOutOfInt32RangeTruncate(dst,
677 scratch1,
678 scratch2,
679 scratch3);
680
681 __ jmp(&done);
682
683 __ bind(&is_smi);
684 __ SmiUntag(dst, object);
685 __ bind(&done);
686}
687
688
689void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
690 Register int_scratch,
691 Destination destination,
692 FPURegister double_dst,
693 Register dst1,
694 Register dst2,
695 Register scratch2,
696 FPURegister single_scratch) {
697 ASSERT(!int_scratch.is(scratch2));
698 ASSERT(!int_scratch.is(dst1));
699 ASSERT(!int_scratch.is(dst2));
700
701 Label done;
702
703 if (CpuFeatures::IsSupported(FPU)) {
704 CpuFeatures::Scope scope(FPU);
705 __ mtc1(int_scratch, single_scratch);
706 __ cvt_d_w(double_dst, single_scratch);
707 if (destination == kCoreRegisters) {
708 __ Move(dst1, dst2, double_dst);
709 }
710 } else {
711 Label fewer_than_20_useful_bits;
712 // Expected output:
713 // | dst2 | dst1 |
714 // | s | exp | mantissa |
715
716 // Check for zero.
717 __ mov(dst2, int_scratch);
718 __ mov(dst1, int_scratch);
719 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
720
721 // Preload the sign of the value.
722 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
723 // Get the absolute value of the object (as an unsigned integer).
724 Label skip_sub;
725 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
726 __ Subu(int_scratch, zero_reg, int_scratch);
727 __ bind(&skip_sub);
728
Ben Murdochc7cc0282012-03-05 14:35:55 +0000729 // Get mantissa[51:20].
Ben Murdoch257744e2011-11-30 15:57:28 +0000730
731 // Get the position of the first set bit.
732 __ clz(dst1, int_scratch);
733 __ li(scratch2, 31);
734 __ Subu(dst1, scratch2, dst1);
735
736 // Set the exponent.
737 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
738 __ Ins(dst2, scratch2,
739 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
740
741 // Clear the first non null bit.
742 __ li(scratch2, Operand(1));
743 __ sllv(scratch2, scratch2, dst1);
744 __ li(at, -1);
745 __ Xor(scratch2, scratch2, at);
746 __ And(int_scratch, int_scratch, scratch2);
747
748 // Get the number of bits to set in the lower part of the mantissa.
749 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
750 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
751 // Set the higher 20 bits of the mantissa.
752 __ srlv(at, int_scratch, scratch2);
753 __ or_(dst2, dst2, at);
754 __ li(at, 32);
755 __ subu(scratch2, at, scratch2);
756 __ sllv(dst1, int_scratch, scratch2);
757 __ Branch(&done);
758
759 __ bind(&fewer_than_20_useful_bits);
760 __ li(at, HeapNumber::kMantissaBitsInTopWord);
761 __ subu(scratch2, at, dst1);
762 __ sllv(scratch2, int_scratch, scratch2);
763 __ Or(dst2, dst2, scratch2);
764 // Set dst1 to 0.
765 __ mov(dst1, zero_reg);
766 }
767 __ bind(&done);
768}
769
770
771void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
772 Register object,
773 Destination destination,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000774 DoubleRegister double_dst,
Ben Murdoch257744e2011-11-30 15:57:28 +0000775 Register dst1,
776 Register dst2,
777 Register heap_number_map,
778 Register scratch1,
779 Register scratch2,
780 FPURegister single_scratch,
781 Label* not_int32) {
782 ASSERT(!scratch1.is(object) && !scratch2.is(object));
783 ASSERT(!scratch1.is(scratch2));
784 ASSERT(!heap_number_map.is(object) &&
785 !heap_number_map.is(scratch1) &&
786 !heap_number_map.is(scratch2));
787
788 Label done, obj_is_not_smi;
789
790 __ JumpIfNotSmi(object, &obj_is_not_smi);
791 __ SmiUntag(scratch1, object);
792 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
793 scratch2, single_scratch);
794 __ Branch(&done);
795
796 __ bind(&obj_is_not_smi);
797 if (FLAG_debug_code) {
798 __ AbortIfNotRootValue(heap_number_map,
799 Heap::kHeapNumberMapRootIndex,
800 "HeapNumberMap register clobbered.");
801 }
802 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
803
804 // Load the number.
805 if (CpuFeatures::IsSupported(FPU)) {
806 CpuFeatures::Scope scope(FPU);
807 // Load the double value.
808 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
809
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000810 Register except_flag = scratch2;
811 __ EmitFPUTruncate(kRoundToZero,
812 single_scratch,
813 double_dst,
814 scratch1,
815 except_flag,
816 kCheckForInexactConversion);
Ben Murdoch257744e2011-11-30 15:57:28 +0000817
818 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000819 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000820
821 if (destination == kCoreRegisters) {
822 __ Move(dst1, dst2, double_dst);
823 }
824
825 } else {
826 ASSERT(!scratch1.is(object) && !scratch2.is(object));
827 // Load the double value in the destination registers.
828 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
829 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
830
831 // Check for 0 and -0.
832 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
833 __ Or(scratch1, scratch1, Operand(dst2));
834 __ Branch(&done, eq, scratch1, Operand(zero_reg));
835
836 // Check that the value can be exactly represented by a 32-bit integer.
837 // Jump to not_int32 if that's not the case.
838 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
839
840 // dst1 and dst2 were trashed. Reload the double value.
841 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
842 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
843 }
844
845 __ bind(&done);
846}
847
848
849void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
850 Register object,
851 Register dst,
852 Register heap_number_map,
853 Register scratch1,
854 Register scratch2,
855 Register scratch3,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000856 DoubleRegister double_scratch,
Ben Murdoch257744e2011-11-30 15:57:28 +0000857 Label* not_int32) {
858 ASSERT(!dst.is(object));
859 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
860 ASSERT(!scratch1.is(scratch2) &&
861 !scratch1.is(scratch3) &&
862 !scratch2.is(scratch3));
863
864 Label done;
865
866 // Untag the object into the destination register.
867 __ SmiUntag(dst, object);
868 // Just return if the object is a smi.
869 __ JumpIfSmi(object, &done);
870
871 if (FLAG_debug_code) {
872 __ AbortIfNotRootValue(heap_number_map,
873 Heap::kHeapNumberMapRootIndex,
874 "HeapNumberMap register clobbered.");
875 }
876 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
877
878 // Object is a heap number.
879 // Convert the floating point value to a 32-bit integer.
880 if (CpuFeatures::IsSupported(FPU)) {
881 CpuFeatures::Scope scope(FPU);
882 // Load the double value.
883 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
884
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000885 FPURegister single_scratch = double_scratch.low();
886 Register except_flag = scratch2;
887 __ EmitFPUTruncate(kRoundToZero,
888 single_scratch,
889 double_scratch,
890 scratch1,
891 except_flag,
892 kCheckForInexactConversion);
Ben Murdoch257744e2011-11-30 15:57:28 +0000893
894 // Jump to not_int32 if the operation did not succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000895 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000896 // Get the result in the destination register.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000897 __ mfc1(dst, single_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +0000898
899 } else {
900 // Load the double value in the destination registers.
901 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
902 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
903
904 // Check for 0 and -0.
905 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
906 __ Or(dst, scratch2, Operand(dst));
907 __ Branch(&done, eq, dst, Operand(zero_reg));
908
909 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
910
911 // Registers state after DoubleIs32BitInteger.
912 // dst: mantissa[51:20].
913 // scratch2: 1
914
915 // Shift back the higher bits of the mantissa.
916 __ srlv(dst, dst, scratch3);
917 // Set the implicit first bit.
918 __ li(at, 32);
919 __ subu(scratch3, at, scratch3);
920 __ sllv(scratch2, scratch2, scratch3);
921 __ Or(dst, dst, scratch2);
922 // Set the sign.
923 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
924 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
925 Label skip_sub;
926 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
927 __ Subu(dst, zero_reg, dst);
928 __ bind(&skip_sub);
929 }
930
931 __ bind(&done);
932}
933
934
935void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
936 Register src1,
937 Register src2,
938 Register dst,
939 Register scratch,
940 Label* not_int32) {
941 // Get exponent alone in scratch.
942 __ Ext(scratch,
943 src1,
944 HeapNumber::kExponentShift,
945 HeapNumber::kExponentBits);
946
947 // Substract the bias from the exponent.
948 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
949
950 // src1: higher (exponent) part of the double value.
951 // src2: lower (mantissa) part of the double value.
952 // scratch: unbiased exponent.
953
954 // Fast cases. Check for obvious non 32-bit integer values.
955 // Negative exponent cannot yield 32-bit integers.
956 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
957 // Exponent greater than 31 cannot yield 32-bit integers.
958 // Also, a positive value with an exponent equal to 31 is outside of the
959 // signed 32-bit integer range.
960 // Another way to put it is that if (exponent - signbit) > 30 then the
961 // number cannot be represented as an int32.
962 Register tmp = dst;
963 __ srl(at, src1, 31);
964 __ subu(tmp, scratch, at);
965 __ Branch(not_int32, gt, tmp, Operand(30));
966 // - Bits [21:0] in the mantissa are not null.
967 __ And(tmp, src2, 0x3fffff);
968 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
969
970 // Otherwise the exponent needs to be big enough to shift left all the
971 // non zero bits left. So we need the (30 - exponent) last bits of the
972 // 31 higher bits of the mantissa to be null.
973 // Because bits [21:0] are null, we can check instead that the
Ben Murdochc7cc0282012-03-05 14:35:55 +0000974 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
Ben Murdoch257744e2011-11-30 15:57:28 +0000975
976 // Get the 32 higher bits of the mantissa in dst.
977 __ Ext(dst,
978 src2,
979 HeapNumber::kMantissaBitsInTopWord,
980 32 - HeapNumber::kMantissaBitsInTopWord);
981 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
982 __ or_(dst, dst, at);
983
984 // Create the mask and test the lower bits (of the higher bits).
985 __ li(at, 32);
986 __ subu(scratch, at, scratch);
987 __ li(src2, 1);
988 __ sllv(src1, src2, scratch);
989 __ Subu(src1, src1, Operand(1));
990 __ And(src1, dst, src1);
991 __ Branch(not_int32, ne, src1, Operand(zero_reg));
992}
993
994
995void FloatingPointHelper::CallCCodeForDoubleOperation(
996 MacroAssembler* masm,
997 Token::Value op,
998 Register heap_number_result,
999 Register scratch) {
1000 // Using core registers:
1001 // a0: Left value (least significant part of mantissa).
1002 // a1: Left value (sign, exponent, top of mantissa).
1003 // a2: Right value (least significant part of mantissa).
1004 // a3: Right value (sign, exponent, top of mantissa).
1005
1006 // Assert that heap_number_result is saved.
1007 // We currently always use s0 to pass it.
1008 ASSERT(heap_number_result.is(s0));
1009
1010 // Push the current return address before the C call.
1011 __ push(ra);
1012 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
1013 if (!IsMipsSoftFloatABI) {
1014 CpuFeatures::Scope scope(FPU);
1015 // We are not using MIPS FPU instructions, and parameters for the runtime
1016 // function call are prepaired in a0-a3 registers, but function we are
1017 // calling is compiled with hard-float flag and expecting hard float ABI
1018 // (parameters in f12/f14 registers). We need to copy parameters from
1019 // a0-a3 registers to f12/f14 register pairs.
1020 __ Move(f12, a0, a1);
1021 __ Move(f14, a2, a3);
1022 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001023 {
1024 AllowExternalCallThatCantCauseGC scope(masm);
1025 __ CallCFunction(
1026 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1027 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001028 // Store answer in the overwritable heap number.
1029 if (!IsMipsSoftFloatABI) {
1030 CpuFeatures::Scope scope(FPU);
1031 // Double returned in register f0.
1032 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1033 } else {
1034 // Double returned in registers v0 and v1.
1035 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1036 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1037 }
1038 // Place heap_number_result in v0 and return to the pushed return address.
1039 __ mov(v0, heap_number_result);
1040 __ pop(ra);
1041 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001042}
1043
1044
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001045bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1046 // These variants are compiled ahead of time. See next method.
1047 if (the_int_.is(a1) &&
1048 the_heap_number_.is(v0) &&
1049 scratch_.is(a2) &&
1050 sign_.is(a3)) {
1051 return true;
1052 }
1053 if (the_int_.is(a2) &&
1054 the_heap_number_.is(v0) &&
1055 scratch_.is(a3) &&
1056 sign_.is(a0)) {
1057 return true;
1058 }
1059 // Other register combinations are generated as and when they are needed,
1060 // so it is unsafe to call them from stubs (we can't generate a stub while
1061 // we are generating a stub).
1062 return false;
1063}
1064
1065
1066void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1067 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1068 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1069 stub1.GetCode()->set_is_pregenerated(true);
1070 stub2.GetCode()->set_is_pregenerated(true);
1071}
1072
1073
Steve Block44f0eee2011-05-26 01:26:41 +01001074// See comment for class, this does NOT work for int32's that are in Smi range.
1075void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001076 Label max_negative_int;
1077 // the_int_ has the answer which is a signed int32 but not a Smi.
1078 // We test for the special value that has a different exponent.
1079 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1080 // Test sign, and save for later conditionals.
1081 __ And(sign_, the_int_, Operand(0x80000000u));
1082 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1083
1084 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1085 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1086 uint32_t non_smi_exponent =
1087 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1088 __ li(scratch_, Operand(non_smi_exponent));
1089 // Set the sign bit in scratch_ if the value was negative.
1090 __ or_(scratch_, scratch_, sign_);
1091 // Subtract from 0 if the value was negative.
1092 __ subu(at, zero_reg, the_int_);
1093 __ movn(the_int_, at, sign_);
1094 // We should be masking the implict first digit of the mantissa away here,
1095 // but it just ends up combining harmlessly with the last digit of the
1096 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1097 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1098 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1099 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1100 __ srl(at, the_int_, shift_distance);
1101 __ or_(scratch_, scratch_, at);
1102 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1103 HeapNumber::kExponentOffset));
1104 __ sll(scratch_, the_int_, 32 - shift_distance);
1105 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1106 HeapNumber::kMantissaOffset));
1107 __ Ret();
1108
1109 __ bind(&max_negative_int);
1110 // The max negative int32 is stored as a positive number in the mantissa of
1111 // a double because it uses a sign bit instead of using two's complement.
1112 // The actual mantissa bits stored are all 0 because the implicit most
1113 // significant 1 bit is not stored.
1114 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1115 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1116 __ sw(scratch_,
1117 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1118 __ mov(scratch_, zero_reg);
1119 __ sw(scratch_,
1120 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1121 __ Ret();
1122}
1123
1124
1125// Handle the case where the lhs and rhs are the same object.
1126// Equality is almost reflexive (everything but NaN), so this is a test
1127// for "identity and not NaN".
1128static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1129 Label* slow,
1130 Condition cc,
1131 bool never_nan_nan) {
1132 Label not_identical;
1133 Label heap_number, return_equal;
1134 Register exp_mask_reg = t5;
1135
1136 __ Branch(&not_identical, ne, a0, Operand(a1));
1137
1138 // The two objects are identical. If we know that one of them isn't NaN then
1139 // we now know they test equal.
1140 if (cc != eq || !never_nan_nan) {
1141 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1142
1143 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1144 // so we do the second best thing - test it ourselves.
1145 // They are both equal and they are not both Smis so both of them are not
1146 // Smis. If it's not a heap number, then return equal.
1147 if (cc == less || cc == greater) {
1148 __ GetObjectType(a0, t4, t4);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001149 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001150 } else {
1151 __ GetObjectType(a0, t4, t4);
1152 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1153 // Comparing JS objects with <=, >= is complicated.
1154 if (cc != eq) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001155 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001156 // Normally here we fall through to return_equal, but undefined is
1157 // special: (undefined == undefined) == true, but
1158 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1159 if (cc == less_equal || cc == greater_equal) {
1160 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1161 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1162 __ Branch(&return_equal, ne, a0, Operand(t2));
1163 if (cc == le) {
1164 // undefined <= undefined should fail.
1165 __ li(v0, Operand(GREATER));
1166 } else {
1167 // undefined >= undefined should fail.
1168 __ li(v0, Operand(LESS));
1169 }
1170 __ Ret();
1171 }
1172 }
1173 }
1174 }
1175
1176 __ bind(&return_equal);
1177 if (cc == less) {
1178 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1179 } else if (cc == greater) {
1180 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1181 } else {
1182 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1183 }
1184 __ Ret();
1185
1186 if (cc != eq || !never_nan_nan) {
1187 // For less and greater we don't have to check for NaN since the result of
1188 // x < x is false regardless. For the others here is some code to check
1189 // for NaN.
1190 if (cc != lt && cc != gt) {
1191 __ bind(&heap_number);
1192 // It is a heap number, so return non-equal if it's NaN and equal if it's
1193 // not NaN.
1194
1195 // The representation of NaN values has all exponent bits (52..62) set,
1196 // and not all mantissa bits (0..51) clear.
1197 // Read top bits of double representation (second word of value).
1198 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1199 // Test that exponent bits are all set.
1200 __ And(t3, t2, Operand(exp_mask_reg));
1201 // If all bits not set (ne cond), then not a NaN, objects are equal.
1202 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1203
1204 // Shift out flag and all exponent bits, retaining only mantissa.
1205 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1206 // Or with all low-bits of mantissa.
1207 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1208 __ Or(v0, t3, Operand(t2));
1209 // For equal we already have the right value in v0: Return zero (equal)
1210 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1211 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1212 // value if it's a NaN.
1213 if (cc != eq) {
1214 // All-zero means Infinity means equal.
1215 __ Ret(eq, v0, Operand(zero_reg));
1216 if (cc == le) {
1217 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1218 } else {
1219 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1220 }
1221 }
1222 __ Ret();
1223 }
1224 // No fall through here.
1225 }
1226
1227 __ bind(&not_identical);
1228}
1229
1230
1231static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1232 Register lhs,
1233 Register rhs,
1234 Label* both_loaded_as_doubles,
1235 Label* slow,
1236 bool strict) {
1237 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1238 (lhs.is(a1) && rhs.is(a0)));
1239
1240 Label lhs_is_smi;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001241 __ JumpIfSmi(lhs, &lhs_is_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +00001242 // Rhs is a Smi.
1243 // Check whether the non-smi is a heap number.
1244 __ GetObjectType(lhs, t4, t4);
1245 if (strict) {
1246 // If lhs was not a number and rhs was a Smi then strict equality cannot
1247 // succeed. Return non-equal (lhs is already not zero).
1248 __ mov(v0, lhs);
1249 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1250 } else {
1251 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1252 // the runtime.
1253 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1254 }
1255
1256 // Rhs is a smi, lhs is a number.
1257 // Convert smi rhs to double.
1258 if (CpuFeatures::IsSupported(FPU)) {
1259 CpuFeatures::Scope scope(FPU);
1260 __ sra(at, rhs, kSmiTagSize);
1261 __ mtc1(at, f14);
1262 __ cvt_d_w(f14, f14);
1263 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1264 } else {
1265 // Load lhs to a double in a2, a3.
1266 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1267 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1268
1269 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1270 __ mov(t6, rhs);
1271 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1272 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001273 __ Call(stub1.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001274
1275 __ pop(ra);
1276 }
1277
1278 // We now have both loaded as doubles.
1279 __ jmp(both_loaded_as_doubles);
1280
1281 __ bind(&lhs_is_smi);
1282 // Lhs is a Smi. Check whether the non-smi is a heap number.
1283 __ GetObjectType(rhs, t4, t4);
1284 if (strict) {
1285 // If lhs was not a number and rhs was a Smi then strict equality cannot
1286 // succeed. Return non-equal.
1287 __ li(v0, Operand(1));
1288 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1289 } else {
1290 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1291 // the runtime.
1292 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1293 }
1294
1295 // Lhs is a smi, rhs is a number.
1296 // Convert smi lhs to double.
1297 if (CpuFeatures::IsSupported(FPU)) {
1298 CpuFeatures::Scope scope(FPU);
1299 __ sra(at, lhs, kSmiTagSize);
1300 __ mtc1(at, f12);
1301 __ cvt_d_w(f12, f12);
1302 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1303 } else {
1304 // Convert lhs to a double format. t5 is scratch.
1305 __ mov(t6, lhs);
1306 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1307 __ push(ra);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001308 __ Call(stub2.GetCode());
Ben Murdoch257744e2011-11-30 15:57:28 +00001309 __ pop(ra);
1310 // Load rhs to a double in a1, a0.
1311 if (rhs.is(a0)) {
1312 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1313 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1314 } else {
1315 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1316 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1317 }
1318 }
1319 // Fall through to both_loaded_as_doubles.
Steve Block44f0eee2011-05-26 01:26:41 +01001320}
1321
1322
1323void EmitNanCheck(MacroAssembler* masm, Condition cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001324 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1325 if (CpuFeatures::IsSupported(FPU)) {
1326 CpuFeatures::Scope scope(FPU);
1327 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1328 __ Move(t0, t1, f14);
1329 __ Move(t2, t3, f12);
1330 } else {
1331 // Lhs and rhs are already loaded to GP registers.
1332 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1333 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1334 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1335 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1336 }
1337 Register rhs_exponent = exp_first ? t0 : t1;
1338 Register lhs_exponent = exp_first ? t2 : t3;
1339 Register rhs_mantissa = exp_first ? t1 : t0;
1340 Register lhs_mantissa = exp_first ? t3 : t2;
1341 Label one_is_nan, neither_is_nan;
1342 Label lhs_not_nan_exp_mask_is_loaded;
1343
1344 Register exp_mask_reg = t4;
1345 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1346 __ and_(t5, lhs_exponent, exp_mask_reg);
1347 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1348
1349 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1350 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1351
1352 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1353
1354 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1355 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1356 __ and_(t5, rhs_exponent, exp_mask_reg);
1357
1358 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1359
1360 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1361 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1362
1363 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1364
1365 __ bind(&one_is_nan);
1366 // NaN comparisons always fail.
1367 // Load whatever we need in v0 to make the comparison fail.
1368 if (cc == lt || cc == le) {
1369 __ li(v0, Operand(GREATER));
1370 } else {
1371 __ li(v0, Operand(LESS));
1372 }
1373 __ Ret(); // Return.
1374
1375 __ bind(&neither_is_nan);
1376}
1377
1378
1379static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1380 // f12 and f14 have the two doubles. Neither is a NaN.
1381 // Call a native function to do a comparison between two non-NaNs.
1382 // Call C routine that may not cause GC or other trouble.
1383 // We use a call_was and return manually because we need arguments slots to
1384 // be freed.
1385
1386 Label return_result_not_equal, return_result_equal;
1387 if (cc == eq) {
1388 // Doubles are not equal unless they have the same bit pattern.
1389 // Exception: 0 and -0.
1390 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1391 if (CpuFeatures::IsSupported(FPU)) {
1392 CpuFeatures::Scope scope(FPU);
1393 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1394 __ Move(t0, t1, f14);
1395 __ Move(t2, t3, f12);
1396 } else {
1397 // Lhs and rhs are already loaded to GP registers.
1398 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1399 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1400 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1401 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1402 }
1403 Register rhs_exponent = exp_first ? t0 : t1;
1404 Register lhs_exponent = exp_first ? t2 : t3;
1405 Register rhs_mantissa = exp_first ? t1 : t0;
1406 Register lhs_mantissa = exp_first ? t3 : t2;
1407
1408 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1409 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1410
1411 __ subu(v0, rhs_exponent, lhs_exponent);
1412 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1413 // 0, -0 case.
1414 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1415 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1416 __ or_(t4, rhs_exponent, lhs_exponent);
1417 __ or_(t4, t4, rhs_mantissa);
1418
1419 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1420
1421 __ bind(&return_result_equal);
1422 __ li(v0, Operand(EQUAL));
1423 __ Ret();
1424 }
1425
1426 __ bind(&return_result_not_equal);
1427
1428 if (!CpuFeatures::IsSupported(FPU)) {
1429 __ push(ra);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001430 __ PrepareCallCFunction(0, 2, t4);
Ben Murdoch257744e2011-11-30 15:57:28 +00001431 if (!IsMipsSoftFloatABI) {
1432 // We are not using MIPS FPU instructions, and parameters for the runtime
1433 // function call are prepaired in a0-a3 registers, but function we are
1434 // calling is compiled with hard-float flag and expecting hard float ABI
1435 // (parameters in f12/f14 registers). We need to copy parameters from
1436 // a0-a3 registers to f12/f14 register pairs.
1437 __ Move(f12, a0, a1);
1438 __ Move(f14, a2, a3);
1439 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001440
1441 AllowExternalCallThatCantCauseGC scope(masm);
1442 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1443 0, 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00001444 __ pop(ra); // Because this function returns int, result is in v0.
1445 __ Ret();
1446 } else {
1447 CpuFeatures::Scope scope(FPU);
1448 Label equal, less_than;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001449 __ BranchF(&equal, NULL, eq, f12, f14);
1450 __ BranchF(&less_than, NULL, lt, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001451
1452 // Not equal, not less, not NaN, must be greater.
1453 __ li(v0, Operand(GREATER));
1454 __ Ret();
1455
1456 __ bind(&equal);
1457 __ li(v0, Operand(EQUAL));
1458 __ Ret();
1459
1460 __ bind(&less_than);
1461 __ li(v0, Operand(LESS));
1462 __ Ret();
1463 }
1464}
1465
1466
1467static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1468 Register lhs,
1469 Register rhs) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001470 // If either operand is a JS object or an oddball value, then they are
Ben Murdoch257744e2011-11-30 15:57:28 +00001471 // not equal since their pointers are different.
1472 // There is no test for undetectability in strict equality.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001473 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Ben Murdoch257744e2011-11-30 15:57:28 +00001474 Label first_non_object;
1475 // Get the type of the first operand into a2 and compare it with
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001476 // FIRST_SPEC_OBJECT_TYPE.
Ben Murdoch257744e2011-11-30 15:57:28 +00001477 __ GetObjectType(lhs, a2, a2);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001478 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001479
1480 // Return non-zero.
1481 Label return_not_equal;
1482 __ bind(&return_not_equal);
1483 __ li(v0, Operand(1));
1484 __ Ret();
1485
1486 __ bind(&first_non_object);
1487 // Check for oddballs: true, false, null, undefined.
1488 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1489
1490 __ GetObjectType(rhs, a3, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001491 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001492
1493 // Check for oddballs: true, false, null, undefined.
1494 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1495
1496 // Now that we have the types we might as well check for symbol-symbol.
1497 // Ensure that no non-strings have the symbol bit set.
1498 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1499 STATIC_ASSERT(kSymbolTag != 0);
1500 __ And(t2, a2, Operand(a3));
1501 __ And(t0, t2, Operand(kIsSymbolMask));
1502 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1503}
1504
1505
1506static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1507 Register lhs,
1508 Register rhs,
1509 Label* both_loaded_as_doubles,
1510 Label* not_heap_numbers,
1511 Label* slow) {
1512 __ GetObjectType(lhs, a3, a2);
1513 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1514 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1515 // If first was a heap number & second wasn't, go to slow case.
1516 __ Branch(slow, ne, a3, Operand(a2));
1517
1518 // Both are heap numbers. Load them up then jump to the code we have
1519 // for that.
1520 if (CpuFeatures::IsSupported(FPU)) {
1521 CpuFeatures::Scope scope(FPU);
1522 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1523 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1524 } else {
1525 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1526 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1527 if (rhs.is(a0)) {
1528 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1529 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1530 } else {
1531 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1532 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1533 }
1534 }
1535 __ jmp(both_loaded_as_doubles);
1536}
1537
1538
1539// Fast negative check for symbol-to-symbol equality.
1540static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1541 Register lhs,
1542 Register rhs,
1543 Label* possible_strings,
1544 Label* not_both_strings) {
1545 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1546 (lhs.is(a1) && rhs.is(a0)));
1547
1548 // a2 is object type of lhs.
1549 // Ensure that no non-strings have the symbol bit set.
1550 Label object_test;
1551 STATIC_ASSERT(kSymbolTag != 0);
1552 __ And(at, a2, Operand(kIsNotStringMask));
1553 __ Branch(&object_test, ne, at, Operand(zero_reg));
1554 __ And(at, a2, Operand(kIsSymbolMask));
1555 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1556 __ GetObjectType(rhs, a3, a3);
1557 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1558 __ And(at, a3, Operand(kIsSymbolMask));
1559 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1560
1561 // Both are symbols. We already checked they weren't the same pointer
1562 // so they are not equal.
1563 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1564 __ Ret();
1565
1566 __ bind(&object_test);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001567 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001568 __ GetObjectType(rhs, a2, a3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001569 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
Ben Murdoch257744e2011-11-30 15:57:28 +00001570
1571 // If both objects are undetectable, they are equal. Otherwise, they
1572 // are not equal, since they are different objects and an object is not
1573 // equal to undefined.
1574 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1575 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1576 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1577 __ and_(a0, a2, a3);
1578 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1579 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1580 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001581}
1582
1583
1584void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1585 Register object,
1586 Register result,
1587 Register scratch1,
1588 Register scratch2,
1589 Register scratch3,
1590 bool object_is_smi,
1591 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001592 // Use of registers. Register result is used as a temporary.
1593 Register number_string_cache = result;
1594 Register mask = scratch3;
1595
1596 // Load the number string cache.
1597 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1598
1599 // Make the hash mask from the length of the number string cache. It
1600 // contains two elements (number and string) for each cache entry.
1601 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1602 // Divide length by two (length is a smi).
1603 __ sra(mask, mask, kSmiTagSize + 1);
1604 __ Addu(mask, mask, -1); // Make mask.
1605
1606 // Calculate the entry in the number string cache. The hash value in the
1607 // number string cache for smis is just the smi value, and the hash for
1608 // doubles is the xor of the upper and lower words. See
1609 // Heap::GetNumberStringCache.
1610 Isolate* isolate = masm->isolate();
1611 Label is_smi;
1612 Label load_result_from_cache;
1613 if (!object_is_smi) {
1614 __ JumpIfSmi(object, &is_smi);
1615 if (CpuFeatures::IsSupported(FPU)) {
1616 CpuFeatures::Scope scope(FPU);
1617 __ CheckMap(object,
1618 scratch1,
1619 Heap::kHeapNumberMapRootIndex,
1620 not_found,
1621 DONT_DO_SMI_CHECK);
1622
1623 STATIC_ASSERT(8 == kDoubleSize);
1624 __ Addu(scratch1,
1625 object,
1626 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1627 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1628 __ lw(scratch1, MemOperand(scratch1, 0));
1629 __ Xor(scratch1, scratch1, Operand(scratch2));
1630 __ And(scratch1, scratch1, Operand(mask));
1631
1632 // Calculate address of entry in string cache: each entry consists
1633 // of two pointer sized fields.
1634 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1635 __ Addu(scratch1, number_string_cache, scratch1);
1636
1637 Register probe = mask;
1638 __ lw(probe,
1639 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1640 __ JumpIfSmi(probe, not_found);
1641 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1642 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001643 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001644 __ Branch(not_found);
1645 } else {
1646 // Note that there is no cache check for non-FPU case, even though
1647 // it seems there could be. May be a tiny opimization for non-FPU
1648 // cores.
1649 __ Branch(not_found);
1650 }
1651 }
1652
1653 __ bind(&is_smi);
1654 Register scratch = scratch1;
1655 __ sra(scratch, object, 1); // Shift away the tag.
1656 __ And(scratch, mask, Operand(scratch));
1657
1658 // Calculate address of entry in string cache: each entry consists
1659 // of two pointer sized fields.
1660 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1661 __ Addu(scratch, number_string_cache, scratch);
1662
1663 // Check if the entry is the smi we are looking for.
1664 Register probe = mask;
1665 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1666 __ Branch(not_found, ne, object, Operand(probe));
1667
1668 // Get the result from the cache.
1669 __ bind(&load_result_from_cache);
1670 __ lw(result,
1671 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1672
1673 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1674 1,
1675 scratch1,
1676 scratch2);
Steve Block44f0eee2011-05-26 01:26:41 +01001677}
1678
1679
1680void NumberToStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001681 Label runtime;
1682
1683 __ lw(a1, MemOperand(sp, 0));
1684
1685 // Generate code to lookup number in the number string cache.
1686 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1687 __ Addu(sp, sp, Operand(1 * kPointerSize));
1688 __ Ret();
1689
1690 __ bind(&runtime);
1691 // Handle number to string in the runtime system if not found in the cache.
1692 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01001693}
1694
1695
1696// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1697// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1698// of the comparison.
1699void CompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001700 Label slow; // Call builtin.
1701 Label not_smis, both_loaded_as_doubles;
1702
1703
1704 if (include_smi_compare_) {
1705 Label not_two_smis, smi_done;
1706 __ Or(a2, a1, a0);
1707 __ JumpIfNotSmi(a2, &not_two_smis);
1708 __ sra(a1, a1, 1);
1709 __ sra(a0, a0, 1);
1710 __ Subu(v0, a1, a0);
1711 __ Ret();
1712 __ bind(&not_two_smis);
1713 } else if (FLAG_debug_code) {
1714 __ Or(a2, a1, a0);
1715 __ And(a2, a2, kSmiTagMask);
1716 __ Assert(ne, "CompareStub: unexpected smi operands.",
1717 a2, Operand(zero_reg));
1718 }
1719
1720
1721 // NOTICE! This code is only reached after a smi-fast-case check, so
1722 // it is certain that at least one operand isn't a smi.
1723
1724 // Handle the case where the objects are identical. Either returns the answer
1725 // or goes to slow. Only falls through if the objects were not identical.
1726 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1727
1728 // If either is a Smi (we know that not both are), then they can only
1729 // be strictly equal if the other is a HeapNumber.
1730 STATIC_ASSERT(kSmiTag == 0);
1731 ASSERT_EQ(0, Smi::FromInt(0));
1732 __ And(t2, lhs_, Operand(rhs_));
1733 __ JumpIfNotSmi(t2, &not_smis, t0);
1734 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1735 // 1) Return the answer.
1736 // 2) Go to slow.
1737 // 3) Fall through to both_loaded_as_doubles.
1738 // 4) Jump to rhs_not_nan.
1739 // In cases 3 and 4 we have found out we were dealing with a number-number
1740 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1741 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1742 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1743 &both_loaded_as_doubles, &slow, strict_);
1744
1745 __ bind(&both_loaded_as_doubles);
1746 // f12, f14 are the double representations of the left hand side
1747 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1748 // left hand side and a0, a1 represent right hand side.
1749
1750 Isolate* isolate = masm->isolate();
1751 if (CpuFeatures::IsSupported(FPU)) {
1752 CpuFeatures::Scope scope(FPU);
1753 Label nan;
1754 __ li(t0, Operand(LESS));
1755 __ li(t1, Operand(GREATER));
1756 __ li(t2, Operand(EQUAL));
1757
1758 // Check if either rhs or lhs is NaN.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001759 __ BranchF(NULL, &nan, eq, f12, f14);
Ben Murdoch257744e2011-11-30 15:57:28 +00001760
1761 // Check if LESS condition is satisfied. If true, move conditionally
1762 // result to v0.
1763 __ c(OLT, D, f12, f14);
1764 __ movt(v0, t0);
1765 // Use previous check to store conditionally to v0 oposite condition
1766 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1767 // check.
1768 __ movf(v0, t1);
1769 // Check if EQUAL condition is satisfied. If true, move conditionally
1770 // result to v0.
1771 __ c(EQ, D, f12, f14);
1772 __ movt(v0, t2);
1773
1774 __ Ret();
1775
1776 __ bind(&nan);
1777 // NaN comparisons always fail.
1778 // Load whatever we need in v0 to make the comparison fail.
1779 if (cc_ == lt || cc_ == le) {
1780 __ li(v0, Operand(GREATER));
1781 } else {
1782 __ li(v0, Operand(LESS));
1783 }
1784 __ Ret();
1785 } else {
1786 // Checks for NaN in the doubles we have loaded. Can return the answer or
1787 // fall through if neither is a NaN. Also binds rhs_not_nan.
1788 EmitNanCheck(masm, cc_);
1789
1790 // Compares two doubles that are not NaNs. Returns the answer.
1791 // Never falls through.
1792 EmitTwoNonNanDoubleComparison(masm, cc_);
1793 }
1794
1795 __ bind(&not_smis);
1796 // At this point we know we are dealing with two different objects,
1797 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1798 if (strict_) {
1799 // This returns non-equal for some object types, or falls through if it
1800 // was not lucky.
1801 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1802 }
1803
1804 Label check_for_symbols;
1805 Label flat_string_check;
1806 // Check for heap-number-heap-number comparison. Can jump to slow case,
1807 // or load both doubles and jump to the code that handles
1808 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1809 // In this case a2 will contain the type of lhs_.
1810 EmitCheckForTwoHeapNumbers(masm,
1811 lhs_,
1812 rhs_,
1813 &both_loaded_as_doubles,
1814 &check_for_symbols,
1815 &flat_string_check);
1816
1817 __ bind(&check_for_symbols);
1818 if (cc_ == eq && !strict_) {
1819 // Returns an answer for two symbols or two detectable objects.
1820 // Otherwise jumps to string case or not both strings case.
1821 // Assumes that a2 is the type of lhs_ on entry.
1822 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1823 }
1824
1825 // Check for both being sequential ASCII strings, and inline if that is the
1826 // case.
1827 __ bind(&flat_string_check);
1828
1829 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1830
1831 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1832 if (cc_ == eq) {
1833 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1834 lhs_,
1835 rhs_,
1836 a2,
1837 a3,
1838 t0);
1839 } else {
1840 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1841 lhs_,
1842 rhs_,
1843 a2,
1844 a3,
1845 t0,
1846 t1);
1847 }
1848 // Never falls through to here.
1849
1850 __ bind(&slow);
1851 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1852 // a1 (rhs) second.
1853 __ Push(lhs_, rhs_);
1854 // Figure out which native to call and setup the arguments.
1855 Builtins::JavaScript native;
1856 if (cc_ == eq) {
1857 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1858 } else {
1859 native = Builtins::COMPARE;
1860 int ncr; // NaN compare result.
1861 if (cc_ == lt || cc_ == le) {
1862 ncr = GREATER;
1863 } else {
1864 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1865 ncr = LESS;
1866 }
1867 __ li(a0, Operand(Smi::FromInt(ncr)));
1868 __ push(a0);
1869 }
1870
1871 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1872 // tagged as a small integer.
1873 __ InvokeBuiltin(native, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +01001874}
1875
1876
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001877// The stub expects its argument in the tos_ register and returns its result in
1878// it, too: zero for false, and a non-zero value for true.
Steve Block44f0eee2011-05-26 01:26:41 +01001879void ToBooleanStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001880 // This stub uses FPU instructions.
1881 CpuFeatures::Scope scope(FPU);
1882
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001883 Label patch;
1884 const Register map = t5.is(tos_) ? t3 : t5;
Ben Murdoch257744e2011-11-30 15:57:28 +00001885
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001886 // undefined -> false.
1887 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
Ben Murdoch257744e2011-11-30 15:57:28 +00001888
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001889 // Boolean -> its value.
1890 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1891 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00001892
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001893 // 'null' -> false.
1894 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
Ben Murdoch257744e2011-11-30 15:57:28 +00001895
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001896 if (types_.Contains(SMI)) {
1897 // Smis: 0 -> false, all other -> true
1898 __ And(at, tos_, kSmiTagMask);
1899 // tos_ contains the correct return value already
1900 __ Ret(eq, at, Operand(zero_reg));
1901 } else if (types_.NeedsMap()) {
1902 // If we need a map later and have a Smi -> patch.
1903 __ JumpIfSmi(tos_, &patch);
1904 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001905
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001906 if (types_.NeedsMap()) {
1907 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00001908
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001909 if (types_.CanBeUndetectable()) {
1910 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1911 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1912 // Undetectable -> false.
1913 __ movn(tos_, zero_reg, at);
1914 __ Ret(ne, at, Operand(zero_reg));
1915 }
1916 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001917
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001918 if (types_.Contains(SPEC_OBJECT)) {
1919 // Spec object -> true.
1920 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1921 // tos_ contains the correct non-zero return value already.
1922 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1923 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001924
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001925 if (types_.Contains(STRING)) {
1926 // String value -> false iff empty.
1927 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1928 Label skip;
1929 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1930 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1931 __ Ret(); // the string length is OK as the return value
1932 __ bind(&skip);
1933 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001934
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001935 if (types_.Contains(HEAP_NUMBER)) {
1936 // Heap number -> false iff +0, -0, or NaN.
1937 Label not_heap_number;
1938 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1939 __ Branch(&not_heap_number, ne, map, Operand(at));
1940 Label zero_or_nan, number;
1941 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1942 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1943 // "tos_" is a register, and contains a non zero value by default.
1944 // Hence we only need to overwrite "tos_" with zero to return false for
1945 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1946 __ bind(&zero_or_nan);
1947 __ mov(tos_, zero_reg);
1948 __ bind(&number);
1949 __ Ret();
1950 __ bind(&not_heap_number);
1951 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001952
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001953 __ bind(&patch);
1954 GenerateTypeTransition(masm);
1955}
Ben Murdoch257744e2011-11-30 15:57:28 +00001956
Ben Murdoch257744e2011-11-30 15:57:28 +00001957
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001958void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1959 Type type,
1960 Heap::RootListIndex value,
1961 bool result) {
1962 if (types_.Contains(type)) {
1963 // If we see an expected oddball, return its ToBoolean value tos_.
1964 __ LoadRoot(at, value);
1965 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1966 // The value of a root is never NULL, so we can avoid loading a non-null
1967 // value into tos_ when we want to return 'true'.
1968 if (!result) {
1969 __ movz(tos_, zero_reg, at);
1970 }
1971 __ Ret(eq, at, Operand(zero_reg));
1972 }
1973}
Ben Murdoch257744e2011-11-30 15:57:28 +00001974
Ben Murdoch257744e2011-11-30 15:57:28 +00001975
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001976void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1977 __ Move(a3, tos_);
1978 __ li(a2, Operand(Smi::FromInt(tos_.code())));
1979 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1980 __ Push(a3, a2, a1);
1981 // Patch the caller to an appropriate specialized stub and return the
1982 // operation result to the caller of the stub.
1983 __ TailCallExternalReference(
1984 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1985 3,
1986 1);
1987}
1988
1989
1990void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1991 // We don't allow a GC during a store buffer overflow so there is no need to
1992 // store the registers in any particular way, but we do have to store and
1993 // restore them.
1994 __ MultiPush(kJSCallerSaved | ra.bit());
1995 if (save_doubles_ == kSaveFPRegs) {
1996 CpuFeatures::Scope scope(FPU);
1997 __ MultiPushFPU(kCallerSavedFPU);
1998 }
1999 const int argument_count = 1;
2000 const int fp_argument_count = 0;
2001 const Register scratch = a1;
2002
2003 AllowExternalCallThatCantCauseGC scope(masm);
2004 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2005 __ li(a0, Operand(ExternalReference::isolate_address()));
2006 __ CallCFunction(
2007 ExternalReference::store_buffer_overflow_function(masm->isolate()),
2008 argument_count);
2009 if (save_doubles_ == kSaveFPRegs) {
2010 CpuFeatures::Scope scope(FPU);
2011 __ MultiPopFPU(kCallerSavedFPU);
2012 }
2013
2014 __ MultiPop(kJSCallerSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002015 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01002016}
2017
2018
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002019void UnaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002020 const char* op_name = Token::Name(op_);
2021 const char* overwrite_name = NULL; // Make g++ happy.
2022 switch (mode_) {
2023 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2024 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2025 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002026 stream->Add("UnaryOpStub_%s_%s_%s",
2027 op_name,
2028 overwrite_name,
2029 UnaryOpIC::GetName(operand_type_));
Ben Murdoch257744e2011-11-30 15:57:28 +00002030}
2031
2032
2033// TODO(svenpanne): Use virtual functions instead of switch.
2034void UnaryOpStub::Generate(MacroAssembler* masm) {
2035 switch (operand_type_) {
2036 case UnaryOpIC::UNINITIALIZED:
2037 GenerateTypeTransition(masm);
2038 break;
2039 case UnaryOpIC::SMI:
2040 GenerateSmiStub(masm);
2041 break;
2042 case UnaryOpIC::HEAP_NUMBER:
2043 GenerateHeapNumberStub(masm);
2044 break;
2045 case UnaryOpIC::GENERIC:
2046 GenerateGenericStub(masm);
2047 break;
2048 }
2049}
2050
2051
2052void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2053 // Argument is in a0 and v0 at this point, so we can overwrite a0.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002054 __ li(a2, Operand(Smi::FromInt(op_)));
2055 __ li(a1, Operand(Smi::FromInt(mode_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00002056 __ li(a0, Operand(Smi::FromInt(operand_type_)));
Ben Murdoch257744e2011-11-30 15:57:28 +00002057 __ Push(v0, a2, a1, a0);
2058
2059 __ TailCallExternalReference(
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002060 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00002061}
2062
2063
2064// TODO(svenpanne): Use virtual functions instead of switch.
2065void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2066 switch (op_) {
2067 case Token::SUB:
2068 GenerateSmiStubSub(masm);
2069 break;
2070 case Token::BIT_NOT:
2071 GenerateSmiStubBitNot(masm);
2072 break;
2073 default:
2074 UNREACHABLE();
2075 }
2076}
2077
2078
2079void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2080 Label non_smi, slow;
2081 GenerateSmiCodeSub(masm, &non_smi, &slow);
2082 __ bind(&non_smi);
2083 __ bind(&slow);
2084 GenerateTypeTransition(masm);
2085}
2086
2087
2088void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2089 Label non_smi;
2090 GenerateSmiCodeBitNot(masm, &non_smi);
2091 __ bind(&non_smi);
2092 GenerateTypeTransition(masm);
2093}
2094
2095
2096void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2097 Label* non_smi,
2098 Label* slow) {
2099 __ JumpIfNotSmi(a0, non_smi);
2100
2101 // The result of negating zero or the smallest negative smi is not a smi.
2102 __ And(t0, a0, ~0x80000000);
2103 __ Branch(slow, eq, t0, Operand(zero_reg));
2104
2105 // Return '0 - value'.
2106 __ Subu(v0, zero_reg, a0);
2107 __ Ret();
2108}
2109
2110
2111void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2112 Label* non_smi) {
2113 __ JumpIfNotSmi(a0, non_smi);
2114
2115 // Flip bits and revert inverted smi-tag.
2116 __ Neg(v0, a0);
2117 __ And(v0, v0, ~kSmiTagMask);
2118 __ Ret();
2119}
2120
2121
2122// TODO(svenpanne): Use virtual functions instead of switch.
2123void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2124 switch (op_) {
2125 case Token::SUB:
2126 GenerateHeapNumberStubSub(masm);
2127 break;
2128 case Token::BIT_NOT:
2129 GenerateHeapNumberStubBitNot(masm);
2130 break;
2131 default:
2132 UNREACHABLE();
2133 }
2134}
2135
2136
2137void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2138 Label non_smi, slow, call_builtin;
2139 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2140 __ bind(&non_smi);
2141 GenerateHeapNumberCodeSub(masm, &slow);
2142 __ bind(&slow);
2143 GenerateTypeTransition(masm);
2144 __ bind(&call_builtin);
2145 GenerateGenericCodeFallback(masm);
2146}
2147
2148
2149void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2150 Label non_smi, slow;
2151 GenerateSmiCodeBitNot(masm, &non_smi);
2152 __ bind(&non_smi);
2153 GenerateHeapNumberCodeBitNot(masm, &slow);
2154 __ bind(&slow);
2155 GenerateTypeTransition(masm);
2156}
2157
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002158
Ben Murdoch257744e2011-11-30 15:57:28 +00002159void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2160 Label* slow) {
2161 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2162 // a0 is a heap number. Get a new heap number in a1.
2163 if (mode_ == UNARY_OVERWRITE) {
2164 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2165 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2166 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2167 } else {
2168 Label slow_allocate_heapnumber, heapnumber_allocated;
2169 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2170 __ jmp(&heapnumber_allocated);
2171
2172 __ bind(&slow_allocate_heapnumber);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002173 {
2174 FrameScope scope(masm, StackFrame::INTERNAL);
2175 __ push(a0);
2176 __ CallRuntime(Runtime::kNumberAlloc, 0);
2177 __ mov(a1, v0);
2178 __ pop(a0);
2179 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002180
2181 __ bind(&heapnumber_allocated);
2182 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2183 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2184 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2185 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2186 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2187 __ mov(v0, a1);
2188 }
2189 __ Ret();
2190}
2191
2192
2193void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2194 MacroAssembler* masm,
2195 Label* slow) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002196 Label impossible;
2197
Ben Murdoch257744e2011-11-30 15:57:28 +00002198 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2199 // Convert the heap number in a0 to an untagged integer in a1.
2200 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2201
2202 // Do the bitwise operation and check if the result fits in a smi.
2203 Label try_float;
2204 __ Neg(a1, a1);
2205 __ Addu(a2, a1, Operand(0x40000000));
2206 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2207
2208 // Tag the result as a smi and we're done.
2209 __ SmiTag(v0, a1);
2210 __ Ret();
2211
2212 // Try to store the result in a heap number.
2213 __ bind(&try_float);
2214 if (mode_ == UNARY_NO_OVERWRITE) {
2215 Label slow_allocate_heapnumber, heapnumber_allocated;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002216 // Allocate a new heap number without zapping v0, which we need if it fails.
2217 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
Ben Murdoch257744e2011-11-30 15:57:28 +00002218 __ jmp(&heapnumber_allocated);
2219
2220 __ bind(&slow_allocate_heapnumber);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002221 {
2222 FrameScope scope(masm, StackFrame::INTERNAL);
2223 __ push(v0); // Push the heap number, not the untagged int32.
2224 __ CallRuntime(Runtime::kNumberAlloc, 0);
2225 __ mov(a2, v0); // Move the new heap number into a2.
2226 // Get the heap number into v0, now that the new heap number is in a2.
2227 __ pop(v0);
2228 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002229
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002230 // Convert the heap number in v0 to an untagged integer in a1.
2231 // This can't go slow-case because it's the same number we already
2232 // converted once again.
2233 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2234 // Negate the result.
2235 __ Xor(a1, a1, -1);
2236
Ben Murdoch257744e2011-11-30 15:57:28 +00002237 __ bind(&heapnumber_allocated);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002238 __ mov(v0, a2); // Move newly allocated heap number to v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00002239 }
2240
2241 if (CpuFeatures::IsSupported(FPU)) {
2242 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2243 CpuFeatures::Scope scope(FPU);
2244 __ mtc1(a1, f0);
2245 __ cvt_d_w(f0, f0);
2246 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2247 __ Ret();
2248 } else {
2249 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2250 // have to set up a frame.
2251 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2252 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2253 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002254
2255 __ bind(&impossible);
2256 if (FLAG_debug_code) {
2257 __ stop("Incorrect assumption in bit-not stub");
2258 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002259}
2260
2261
2262// TODO(svenpanne): Use virtual functions instead of switch.
2263void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2264 switch (op_) {
2265 case Token::SUB:
2266 GenerateGenericStubSub(masm);
2267 break;
2268 case Token::BIT_NOT:
2269 GenerateGenericStubBitNot(masm);
2270 break;
2271 default:
2272 UNREACHABLE();
2273 }
2274}
2275
2276
2277void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2278 Label non_smi, slow;
2279 GenerateSmiCodeSub(masm, &non_smi, &slow);
2280 __ bind(&non_smi);
2281 GenerateHeapNumberCodeSub(masm, &slow);
2282 __ bind(&slow);
2283 GenerateGenericCodeFallback(masm);
2284}
2285
2286
2287void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2288 Label non_smi, slow;
2289 GenerateSmiCodeBitNot(masm, &non_smi);
2290 __ bind(&non_smi);
2291 GenerateHeapNumberCodeBitNot(masm, &slow);
2292 __ bind(&slow);
2293 GenerateGenericCodeFallback(masm);
2294}
2295
2296
2297void UnaryOpStub::GenerateGenericCodeFallback(
2298 MacroAssembler* masm) {
2299 // Handle the slow case by jumping to the JavaScript builtin.
2300 __ push(a0);
2301 switch (op_) {
2302 case Token::SUB:
2303 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2304 break;
2305 case Token::BIT_NOT:
2306 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2307 break;
2308 default:
2309 UNREACHABLE();
2310 }
2311}
2312
2313
Ben Murdoch257744e2011-11-30 15:57:28 +00002314void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2315 Label get_result;
2316
2317 __ Push(a1, a0);
2318
2319 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2320 __ li(a1, Operand(Smi::FromInt(op_)));
2321 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2322 __ Push(a2, a1, a0);
2323
2324 __ TailCallExternalReference(
2325 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2326 masm->isolate()),
2327 5,
2328 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002329}
2330
2331
Ben Murdoch257744e2011-11-30 15:57:28 +00002332void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
Steve Block44f0eee2011-05-26 01:26:41 +01002333 MacroAssembler* masm) {
2334 UNIMPLEMENTED();
2335}
2336
2337
Ben Murdoch257744e2011-11-30 15:57:28 +00002338void BinaryOpStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002339 // Explicitly allow generation of nested stubs. It is safe here because
2340 // generation code does not use any raw pointers.
2341 AllowStubCallsScope allow_stub_calls(masm, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00002342 switch (operands_type_) {
2343 case BinaryOpIC::UNINITIALIZED:
2344 GenerateTypeTransition(masm);
2345 break;
2346 case BinaryOpIC::SMI:
2347 GenerateSmiStub(masm);
2348 break;
2349 case BinaryOpIC::INT32:
2350 GenerateInt32Stub(masm);
2351 break;
2352 case BinaryOpIC::HEAP_NUMBER:
2353 GenerateHeapNumberStub(masm);
2354 break;
2355 case BinaryOpIC::ODDBALL:
2356 GenerateOddballStub(masm);
2357 break;
2358 case BinaryOpIC::BOTH_STRING:
2359 GenerateBothStringStub(masm);
2360 break;
2361 case BinaryOpIC::STRING:
2362 GenerateStringStub(masm);
2363 break;
2364 case BinaryOpIC::GENERIC:
2365 GenerateGeneric(masm);
2366 break;
2367 default:
2368 UNREACHABLE();
2369 }
Steve Block44f0eee2011-05-26 01:26:41 +01002370}
2371
2372
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002373void BinaryOpStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002374 const char* op_name = Token::Name(op_);
2375 const char* overwrite_name;
2376 switch (mode_) {
2377 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2378 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2379 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2380 default: overwrite_name = "UnknownOverwrite"; break;
2381 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002382 stream->Add("BinaryOpStub_%s_%s_%s",
2383 op_name,
2384 overwrite_name,
2385 BinaryOpIC::GetName(operands_type_));
Steve Block44f0eee2011-05-26 01:26:41 +01002386}
2387
2388
2389
Ben Murdoch257744e2011-11-30 15:57:28 +00002390void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2391 Register left = a1;
2392 Register right = a0;
2393
2394 Register scratch1 = t0;
2395 Register scratch2 = t1;
2396
2397 ASSERT(right.is(a0));
2398 STATIC_ASSERT(kSmiTag == 0);
2399
2400 Label not_smi_result;
2401 switch (op_) {
2402 case Token::ADD:
2403 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2404 __ RetOnNoOverflow(scratch1);
2405 // No need to revert anything - right and left are intact.
2406 break;
2407 case Token::SUB:
2408 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2409 __ RetOnNoOverflow(scratch1);
2410 // No need to revert anything - right and left are intact.
2411 break;
2412 case Token::MUL: {
2413 // Remove tag from one of the operands. This way the multiplication result
2414 // will be a smi if it fits the smi range.
2415 __ SmiUntag(scratch1, right);
2416 // Do multiplication.
2417 // lo = lower 32 bits of scratch1 * left.
2418 // hi = higher 32 bits of scratch1 * left.
2419 __ Mult(left, scratch1);
2420 // Check for overflowing the smi range - no overflow if higher 33 bits of
2421 // the result are identical.
2422 __ mflo(scratch1);
2423 __ mfhi(scratch2);
2424 __ sra(scratch1, scratch1, 31);
2425 __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2426 // Go slow on zero result to handle -0.
2427 __ mflo(v0);
2428 __ Ret(ne, v0, Operand(zero_reg));
2429 // We need -0 if we were multiplying a negative number with 0 to get 0.
2430 // We know one of them was zero.
2431 __ Addu(scratch2, right, left);
2432 Label skip;
2433 // ARM uses the 'pl' condition, which is 'ge'.
2434 // Negating it results in 'lt'.
2435 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2436 ASSERT(Smi::FromInt(0) == 0);
2437 __ mov(v0, zero_reg);
2438 __ Ret(); // Return smi 0 if the non-zero one was positive.
2439 __ bind(&skip);
2440 // We fall through here if we multiplied a negative number with 0, because
2441 // that would mean we should produce -0.
2442 }
2443 break;
2444 case Token::DIV: {
2445 Label done;
2446 __ SmiUntag(scratch2, right);
2447 __ SmiUntag(scratch1, left);
2448 __ Div(scratch1, scratch2);
2449 // A minor optimization: div may be calculated asynchronously, so we check
2450 // for division by zero before getting the result.
2451 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2452 // If the result is 0, we need to make sure the dividsor (right) is
2453 // positive, otherwise it is a -0 case.
2454 // Quotient is in 'lo', remainder is in 'hi'.
2455 // Check for no remainder first.
2456 __ mfhi(scratch1);
2457 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2458 __ mflo(scratch1);
2459 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2460 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2461 __ bind(&done);
2462 // Check that the signed result fits in a Smi.
2463 __ Addu(scratch2, scratch1, Operand(0x40000000));
2464 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2465 __ SmiTag(v0, scratch1);
2466 __ Ret();
2467 }
2468 break;
2469 case Token::MOD: {
2470 Label done;
2471 __ SmiUntag(scratch2, right);
2472 __ SmiUntag(scratch1, left);
2473 __ Div(scratch1, scratch2);
2474 // A minor optimization: div may be calculated asynchronously, so we check
2475 // for division by 0 before calling mfhi.
2476 // Check for zero on the right hand side.
2477 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2478 // If the result is 0, we need to make sure the dividend (left) is
2479 // positive (or 0), otherwise it is a -0 case.
2480 // Remainder is in 'hi'.
2481 __ mfhi(scratch2);
2482 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2483 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2484 __ bind(&done);
2485 // Check that the signed result fits in a Smi.
2486 __ Addu(scratch1, scratch2, Operand(0x40000000));
2487 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2488 __ SmiTag(v0, scratch2);
2489 __ Ret();
2490 }
2491 break;
2492 case Token::BIT_OR:
2493 __ Or(v0, left, Operand(right));
2494 __ Ret();
2495 break;
2496 case Token::BIT_AND:
2497 __ And(v0, left, Operand(right));
2498 __ Ret();
2499 break;
2500 case Token::BIT_XOR:
2501 __ Xor(v0, left, Operand(right));
2502 __ Ret();
2503 break;
2504 case Token::SAR:
2505 // Remove tags from right operand.
2506 __ GetLeastBitsFromSmi(scratch1, right, 5);
2507 __ srav(scratch1, left, scratch1);
2508 // Smi tag result.
2509 __ And(v0, scratch1, Operand(~kSmiTagMask));
2510 __ Ret();
2511 break;
2512 case Token::SHR:
2513 // Remove tags from operands. We can't do this on a 31 bit number
2514 // because then the 0s get shifted into bit 30 instead of bit 31.
2515 __ SmiUntag(scratch1, left);
2516 __ GetLeastBitsFromSmi(scratch2, right, 5);
2517 __ srlv(v0, scratch1, scratch2);
2518 // Unsigned shift is not allowed to produce a negative number, so
2519 // check the sign bit and the sign bit after Smi tagging.
2520 __ And(scratch1, v0, Operand(0xc0000000));
2521 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2522 // Smi tag result.
2523 __ SmiTag(v0);
2524 __ Ret();
2525 break;
2526 case Token::SHL:
2527 // Remove tags from operands.
2528 __ SmiUntag(scratch1, left);
2529 __ GetLeastBitsFromSmi(scratch2, right, 5);
2530 __ sllv(scratch1, scratch1, scratch2);
2531 // Check that the signed result fits in a Smi.
2532 __ Addu(scratch2, scratch1, Operand(0x40000000));
2533 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2534 __ SmiTag(v0, scratch1);
2535 __ Ret();
2536 break;
2537 default:
2538 UNREACHABLE();
2539 }
2540 __ bind(&not_smi_result);
Steve Block44f0eee2011-05-26 01:26:41 +01002541}
2542
2543
Ben Murdoch257744e2011-11-30 15:57:28 +00002544void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2545 bool smi_operands,
2546 Label* not_numbers,
2547 Label* gc_required) {
2548 Register left = a1;
2549 Register right = a0;
2550 Register scratch1 = t3;
2551 Register scratch2 = t5;
2552 Register scratch3 = t0;
2553
2554 ASSERT(smi_operands || (not_numbers != NULL));
2555 if (smi_operands && FLAG_debug_code) {
2556 __ AbortIfNotSmi(left);
2557 __ AbortIfNotSmi(right);
2558 }
2559
2560 Register heap_number_map = t2;
2561 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2562
2563 switch (op_) {
2564 case Token::ADD:
2565 case Token::SUB:
2566 case Token::MUL:
2567 case Token::DIV:
2568 case Token::MOD: {
2569 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2570 // depending on whether FPU is available or not.
2571 FloatingPointHelper::Destination destination =
2572 CpuFeatures::IsSupported(FPU) &&
2573 op_ != Token::MOD ?
2574 FloatingPointHelper::kFPURegisters :
2575 FloatingPointHelper::kCoreRegisters;
2576
2577 // Allocate new heap number for result.
2578 Register result = s0;
2579 GenerateHeapResultAllocation(
2580 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2581
2582 // Load the operands.
2583 if (smi_operands) {
2584 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2585 } else {
2586 FloatingPointHelper::LoadOperands(masm,
2587 destination,
2588 heap_number_map,
2589 scratch1,
2590 scratch2,
2591 not_numbers);
2592 }
2593
2594 // Calculate the result.
2595 if (destination == FloatingPointHelper::kFPURegisters) {
2596 // Using FPU registers:
2597 // f12: Left value.
2598 // f14: Right value.
2599 CpuFeatures::Scope scope(FPU);
2600 switch (op_) {
2601 case Token::ADD:
2602 __ add_d(f10, f12, f14);
2603 break;
2604 case Token::SUB:
2605 __ sub_d(f10, f12, f14);
2606 break;
2607 case Token::MUL:
2608 __ mul_d(f10, f12, f14);
2609 break;
2610 case Token::DIV:
2611 __ div_d(f10, f12, f14);
2612 break;
2613 default:
2614 UNREACHABLE();
2615 }
2616
2617 // ARM uses a workaround here because of the unaligned HeapNumber
2618 // kValueOffset. On MIPS this workaround is built into sdc1 so
2619 // there's no point in generating even more instructions.
2620 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2621 __ mov(v0, result);
2622 __ Ret();
2623 } else {
2624 // Call the C function to handle the double operation.
2625 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2626 op_,
2627 result,
2628 scratch1);
2629 if (FLAG_debug_code) {
2630 __ stop("Unreachable code.");
2631 }
2632 }
2633 break;
2634 }
2635 case Token::BIT_OR:
2636 case Token::BIT_XOR:
2637 case Token::BIT_AND:
2638 case Token::SAR:
2639 case Token::SHR:
2640 case Token::SHL: {
2641 if (smi_operands) {
2642 __ SmiUntag(a3, left);
2643 __ SmiUntag(a2, right);
2644 } else {
2645 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2646 FloatingPointHelper::ConvertNumberToInt32(masm,
2647 left,
2648 a3,
2649 heap_number_map,
2650 scratch1,
2651 scratch2,
2652 scratch3,
2653 f0,
2654 not_numbers);
2655 FloatingPointHelper::ConvertNumberToInt32(masm,
2656 right,
2657 a2,
2658 heap_number_map,
2659 scratch1,
2660 scratch2,
2661 scratch3,
2662 f0,
2663 not_numbers);
2664 }
2665 Label result_not_a_smi;
2666 switch (op_) {
2667 case Token::BIT_OR:
2668 __ Or(a2, a3, Operand(a2));
2669 break;
2670 case Token::BIT_XOR:
2671 __ Xor(a2, a3, Operand(a2));
2672 break;
2673 case Token::BIT_AND:
2674 __ And(a2, a3, Operand(a2));
2675 break;
2676 case Token::SAR:
2677 // Use only the 5 least significant bits of the shift count.
2678 __ GetLeastBitsFromInt32(a2, a2, 5);
2679 __ srav(a2, a3, a2);
2680 break;
2681 case Token::SHR:
2682 // Use only the 5 least significant bits of the shift count.
2683 __ GetLeastBitsFromInt32(a2, a2, 5);
2684 __ srlv(a2, a3, a2);
2685 // SHR is special because it is required to produce a positive answer.
2686 // The code below for writing into heap numbers isn't capable of
2687 // writing the register as an unsigned int so we go to slow case if we
2688 // hit this case.
2689 if (CpuFeatures::IsSupported(FPU)) {
2690 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2691 } else {
2692 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2693 }
2694 break;
2695 case Token::SHL:
2696 // Use only the 5 least significant bits of the shift count.
2697 __ GetLeastBitsFromInt32(a2, a2, 5);
2698 __ sllv(a2, a3, a2);
2699 break;
2700 default:
2701 UNREACHABLE();
2702 }
2703 // Check that the *signed* result fits in a smi.
2704 __ Addu(a3, a2, Operand(0x40000000));
2705 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2706 __ SmiTag(v0, a2);
2707 __ Ret();
2708
2709 // Allocate new heap number for result.
2710 __ bind(&result_not_a_smi);
2711 Register result = t1;
2712 if (smi_operands) {
2713 __ AllocateHeapNumber(
2714 result, scratch1, scratch2, heap_number_map, gc_required);
2715 } else {
2716 GenerateHeapResultAllocation(
2717 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2718 }
2719
2720 // a2: Answer as signed int32.
2721 // t1: Heap number to write answer into.
2722
2723 // Nothing can go wrong now, so move the heap number to v0, which is the
2724 // result.
2725 __ mov(v0, t1);
2726
2727 if (CpuFeatures::IsSupported(FPU)) {
2728 // Convert the int32 in a2 to the heap number in a0. As
2729 // mentioned above SHR needs to always produce a positive result.
2730 CpuFeatures::Scope scope(FPU);
2731 __ mtc1(a2, f0);
2732 if (op_ == Token::SHR) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002733 __ Cvt_d_uw(f0, f0, f22);
Ben Murdoch257744e2011-11-30 15:57:28 +00002734 } else {
2735 __ cvt_d_w(f0, f0);
2736 }
2737 // ARM uses a workaround here because of the unaligned HeapNumber
2738 // kValueOffset. On MIPS this workaround is built into sdc1 so
2739 // there's no point in generating even more instructions.
2740 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2741 __ Ret();
2742 } else {
2743 // Tail call that writes the int32 in a2 to the heap number in v0, using
2744 // a3 and a0 as scratch. v0 is preserved and returned.
2745 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2746 __ TailCallStub(&stub);
2747 }
2748 break;
2749 }
2750 default:
2751 UNREACHABLE();
2752 }
Steve Block44f0eee2011-05-26 01:26:41 +01002753}
2754
2755
2756// Generate the smi code. If the operation on smis are successful this return is
2757// generated. If the result is not a smi and heap number allocation is not
2758// requested the code falls through. If number allocation is requested but a
2759// heap number cannot be allocated the code jumps to the lable gc_required.
Ben Murdoch257744e2011-11-30 15:57:28 +00002760void BinaryOpStub::GenerateSmiCode(
2761 MacroAssembler* masm,
2762 Label* use_runtime,
Steve Block44f0eee2011-05-26 01:26:41 +01002763 Label* gc_required,
2764 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002765 Label not_smis;
2766
2767 Register left = a1;
2768 Register right = a0;
2769 Register scratch1 = t3;
2770 Register scratch2 = t5;
2771
2772 // Perform combined smi check on both operands.
2773 __ Or(scratch1, left, Operand(right));
2774 STATIC_ASSERT(kSmiTag == 0);
2775 __ JumpIfNotSmi(scratch1, &not_smis);
2776
2777 // If the smi-smi operation results in a smi return is generated.
2778 GenerateSmiSmiOperation(masm);
2779
2780 // If heap number results are possible generate the result in an allocated
2781 // heap number.
2782 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2783 GenerateFPOperation(masm, true, use_runtime, gc_required);
2784 }
2785 __ bind(&not_smis);
Steve Block44f0eee2011-05-26 01:26:41 +01002786}
2787
2788
Ben Murdoch257744e2011-11-30 15:57:28 +00002789void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2790 Label not_smis, call_runtime;
2791
2792 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2793 result_type_ == BinaryOpIC::SMI) {
2794 // Only allow smi results.
2795 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2796 } else {
2797 // Allow heap number result and don't make a transition if a heap number
2798 // cannot be allocated.
2799 GenerateSmiCode(masm,
2800 &call_runtime,
2801 &call_runtime,
2802 ALLOW_HEAPNUMBER_RESULTS);
2803 }
2804
2805 // Code falls through if the result is not returned as either a smi or heap
2806 // number.
2807 GenerateTypeTransition(masm);
2808
2809 __ bind(&call_runtime);
2810 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002811}
2812
2813
Ben Murdoch257744e2011-11-30 15:57:28 +00002814void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2815 ASSERT(operands_type_ == BinaryOpIC::STRING);
2816 // Try to add arguments as strings, otherwise, transition to the generic
2817 // BinaryOpIC type.
2818 GenerateAddStrings(masm);
2819 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002820}
2821
2822
Ben Murdoch257744e2011-11-30 15:57:28 +00002823void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2824 Label call_runtime;
2825 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2826 ASSERT(op_ == Token::ADD);
2827 // If both arguments are strings, call the string add stub.
2828 // Otherwise, do a transition.
2829
2830 // Registers containing left and right operands respectively.
2831 Register left = a1;
2832 Register right = a0;
2833
2834 // Test if left operand is a string.
2835 __ JumpIfSmi(left, &call_runtime);
2836 __ GetObjectType(left, a2, a2);
2837 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2838
2839 // Test if right operand is a string.
2840 __ JumpIfSmi(right, &call_runtime);
2841 __ GetObjectType(right, a2, a2);
2842 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2843
2844 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2845 GenerateRegisterArgsPush(masm);
2846 __ TailCallStub(&string_add_stub);
2847
2848 __ bind(&call_runtime);
2849 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002850}
2851
2852
Ben Murdoch257744e2011-11-30 15:57:28 +00002853void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2854 ASSERT(operands_type_ == BinaryOpIC::INT32);
2855
2856 Register left = a1;
2857 Register right = a0;
2858 Register scratch1 = t3;
2859 Register scratch2 = t5;
2860 FPURegister double_scratch = f0;
2861 FPURegister single_scratch = f6;
2862
2863 Register heap_number_result = no_reg;
2864 Register heap_number_map = t2;
2865 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2866
2867 Label call_runtime;
2868 // Labels for type transition, used for wrong input or output types.
2869 // Both label are currently actually bound to the same position. We use two
2870 // different label to differentiate the cause leading to type transition.
2871 Label transition;
2872
2873 // Smi-smi fast case.
2874 Label skip;
2875 __ Or(scratch1, left, right);
2876 __ JumpIfNotSmi(scratch1, &skip);
2877 GenerateSmiSmiOperation(masm);
2878 // Fall through if the result is not a smi.
2879 __ bind(&skip);
2880
2881 switch (op_) {
2882 case Token::ADD:
2883 case Token::SUB:
2884 case Token::MUL:
2885 case Token::DIV:
2886 case Token::MOD: {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002887 // Load both operands and check that they are 32-bit integer.
2888 // Jump to type transition if they are not. The registers a0 and a1 (right
2889 // and left) are preserved for the runtime call.
2890 FloatingPointHelper::Destination destination =
2891 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2892 ? FloatingPointHelper::kFPURegisters
2893 : FloatingPointHelper::kCoreRegisters;
Ben Murdoch257744e2011-11-30 15:57:28 +00002894
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002895 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2896 right,
2897 destination,
2898 f14,
2899 a2,
2900 a3,
2901 heap_number_map,
2902 scratch1,
2903 scratch2,
2904 f2,
2905 &transition);
2906 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2907 left,
2908 destination,
2909 f12,
2910 t0,
2911 t1,
2912 heap_number_map,
2913 scratch1,
2914 scratch2,
2915 f2,
2916 &transition);
Ben Murdoch257744e2011-11-30 15:57:28 +00002917
2918 if (destination == FloatingPointHelper::kFPURegisters) {
2919 CpuFeatures::Scope scope(FPU);
2920 Label return_heap_number;
2921 switch (op_) {
2922 case Token::ADD:
2923 __ add_d(f10, f12, f14);
2924 break;
2925 case Token::SUB:
2926 __ sub_d(f10, f12, f14);
2927 break;
2928 case Token::MUL:
2929 __ mul_d(f10, f12, f14);
2930 break;
2931 case Token::DIV:
2932 __ div_d(f10, f12, f14);
2933 break;
2934 default:
2935 UNREACHABLE();
2936 }
2937
2938 if (op_ != Token::DIV) {
2939 // These operations produce an integer result.
2940 // Try to return a smi if we can.
2941 // Otherwise return a heap number if allowed, or jump to type
2942 // transition.
2943
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002944 Register except_flag = scratch2;
2945 __ EmitFPUTruncate(kRoundToZero,
2946 single_scratch,
2947 f10,
2948 scratch1,
2949 except_flag);
Ben Murdoch257744e2011-11-30 15:57:28 +00002950
2951 if (result_type_ <= BinaryOpIC::INT32) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002952 // If except_flag != 0, result does not fit in a 32-bit integer.
2953 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00002954 }
2955
2956 // Check if the result fits in a smi.
2957 __ mfc1(scratch1, single_scratch);
2958 __ Addu(scratch2, scratch1, Operand(0x40000000));
2959 // If not try to return a heap number.
2960 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2961 // Check for minus zero. Return heap number for minus zero.
2962 Label not_zero;
2963 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2964 __ mfc1(scratch2, f11);
2965 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2966 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2967 __ bind(&not_zero);
2968
2969 // Tag the result and return.
2970 __ SmiTag(v0, scratch1);
2971 __ Ret();
2972 } else {
2973 // DIV just falls through to allocating a heap number.
2974 }
2975
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002976 __ bind(&return_heap_number);
2977 // Return a heap number, or fall through to type transition or runtime
2978 // call if we can't.
2979 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2980 : BinaryOpIC::INT32)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002981 // We are using FPU registers so s0 is available.
2982 heap_number_result = s0;
2983 GenerateHeapResultAllocation(masm,
2984 heap_number_result,
2985 heap_number_map,
2986 scratch1,
2987 scratch2,
2988 &call_runtime);
2989 __ mov(v0, heap_number_result);
2990 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2991 __ Ret();
2992 }
2993
2994 // A DIV operation expecting an integer result falls through
2995 // to type transition.
2996
2997 } else {
2998 // We preserved a0 and a1 to be able to call runtime.
2999 // Save the left value on the stack.
3000 __ Push(t1, t0);
3001
3002 Label pop_and_call_runtime;
3003
3004 // Allocate a heap number to store the result.
3005 heap_number_result = s0;
3006 GenerateHeapResultAllocation(masm,
3007 heap_number_result,
3008 heap_number_map,
3009 scratch1,
3010 scratch2,
3011 &pop_and_call_runtime);
3012
3013 // Load the left value from the value saved on the stack.
3014 __ Pop(a1, a0);
3015
3016 // Call the C function to handle the double operation.
3017 FloatingPointHelper::CallCCodeForDoubleOperation(
3018 masm, op_, heap_number_result, scratch1);
3019 if (FLAG_debug_code) {
3020 __ stop("Unreachable code.");
3021 }
3022
3023 __ bind(&pop_and_call_runtime);
3024 __ Drop(2);
3025 __ Branch(&call_runtime);
3026 }
3027
3028 break;
3029 }
3030
3031 case Token::BIT_OR:
3032 case Token::BIT_XOR:
3033 case Token::BIT_AND:
3034 case Token::SAR:
3035 case Token::SHR:
3036 case Token::SHL: {
3037 Label return_heap_number;
3038 Register scratch3 = t1;
3039 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3040 // registers a0 and a1 (right and left) are preserved for the runtime
3041 // call.
3042 FloatingPointHelper::LoadNumberAsInt32(masm,
3043 left,
3044 a3,
3045 heap_number_map,
3046 scratch1,
3047 scratch2,
3048 scratch3,
3049 f0,
3050 &transition);
3051 FloatingPointHelper::LoadNumberAsInt32(masm,
3052 right,
3053 a2,
3054 heap_number_map,
3055 scratch1,
3056 scratch2,
3057 scratch3,
3058 f0,
3059 &transition);
3060
3061 // The ECMA-262 standard specifies that, for shift operations, only the
3062 // 5 least significant bits of the shift value should be used.
3063 switch (op_) {
3064 case Token::BIT_OR:
3065 __ Or(a2, a3, Operand(a2));
3066 break;
3067 case Token::BIT_XOR:
3068 __ Xor(a2, a3, Operand(a2));
3069 break;
3070 case Token::BIT_AND:
3071 __ And(a2, a3, Operand(a2));
3072 break;
3073 case Token::SAR:
3074 __ And(a2, a2, Operand(0x1f));
3075 __ srav(a2, a3, a2);
3076 break;
3077 case Token::SHR:
3078 __ And(a2, a2, Operand(0x1f));
3079 __ srlv(a2, a3, a2);
3080 // SHR is special because it is required to produce a positive answer.
3081 // We only get a negative result if the shift value (a2) is 0.
3082 // This result cannot be respresented as a signed 32-bit integer, try
3083 // to return a heap number if we can.
3084 // The non FPU code does not support this special case, so jump to
3085 // runtime if we don't support it.
3086 if (CpuFeatures::IsSupported(FPU)) {
3087 __ Branch((result_type_ <= BinaryOpIC::INT32)
3088 ? &transition
3089 : &return_heap_number,
3090 lt,
3091 a2,
3092 Operand(zero_reg));
3093 } else {
3094 __ Branch((result_type_ <= BinaryOpIC::INT32)
3095 ? &transition
3096 : &call_runtime,
3097 lt,
3098 a2,
3099 Operand(zero_reg));
3100 }
3101 break;
3102 case Token::SHL:
3103 __ And(a2, a2, Operand(0x1f));
3104 __ sllv(a2, a3, a2);
3105 break;
3106 default:
3107 UNREACHABLE();
3108 }
3109
3110 // Check if the result fits in a smi.
3111 __ Addu(scratch1, a2, Operand(0x40000000));
3112 // If not try to return a heap number. (We know the result is an int32.)
3113 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3114 // Tag the result and return.
3115 __ SmiTag(v0, a2);
3116 __ Ret();
3117
3118 __ bind(&return_heap_number);
3119 heap_number_result = t1;
3120 GenerateHeapResultAllocation(masm,
3121 heap_number_result,
3122 heap_number_map,
3123 scratch1,
3124 scratch2,
3125 &call_runtime);
3126
3127 if (CpuFeatures::IsSupported(FPU)) {
3128 CpuFeatures::Scope scope(FPU);
3129
3130 if (op_ != Token::SHR) {
3131 // Convert the result to a floating point value.
3132 __ mtc1(a2, double_scratch);
3133 __ cvt_d_w(double_scratch, double_scratch);
3134 } else {
3135 // The result must be interpreted as an unsigned 32-bit integer.
3136 __ mtc1(a2, double_scratch);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003137 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003138 }
3139
3140 // Store the result.
3141 __ mov(v0, heap_number_result);
3142 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3143 __ Ret();
3144 } else {
3145 // Tail call that writes the int32 in a2 to the heap number in v0, using
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003146 // a3 and a0 as scratch. v0 is preserved and returned.
Ben Murdoch257744e2011-11-30 15:57:28 +00003147 __ mov(a0, t1);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003148 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
Ben Murdoch257744e2011-11-30 15:57:28 +00003149 __ TailCallStub(&stub);
3150 }
3151
3152 break;
3153 }
3154
3155 default:
3156 UNREACHABLE();
3157 }
3158
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003159 // We never expect DIV to yield an integer result, so we always generate
3160 // type transition code for DIV operations expecting an integer result: the
3161 // code will fall through to this type transition.
3162 if (transition.is_linked() ||
3163 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003164 __ bind(&transition);
3165 GenerateTypeTransition(masm);
3166 }
3167
3168 __ bind(&call_runtime);
3169 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003170}
3171
3172
Ben Murdoch257744e2011-11-30 15:57:28 +00003173void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3174 Label call_runtime;
3175
3176 if (op_ == Token::ADD) {
3177 // Handle string addition here, because it is the only operation
3178 // that does not do a ToNumber conversion on the operands.
3179 GenerateAddStrings(masm);
3180 }
3181
3182 // Convert oddball arguments to numbers.
3183 Label check, done;
3184 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3185 __ Branch(&check, ne, a1, Operand(t0));
3186 if (Token::IsBitOp(op_)) {
3187 __ li(a1, Operand(Smi::FromInt(0)));
3188 } else {
3189 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3190 }
3191 __ jmp(&done);
3192 __ bind(&check);
3193 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3194 __ Branch(&done, ne, a0, Operand(t0));
3195 if (Token::IsBitOp(op_)) {
3196 __ li(a0, Operand(Smi::FromInt(0)));
3197 } else {
3198 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3199 }
3200 __ bind(&done);
3201
3202 GenerateHeapNumberStub(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003203}
3204
3205
Ben Murdoch257744e2011-11-30 15:57:28 +00003206void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3207 Label call_runtime;
3208 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3209
3210 __ bind(&call_runtime);
3211 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003212}
3213
3214
Ben Murdoch257744e2011-11-30 15:57:28 +00003215void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3216 Label call_runtime, call_string_add_or_runtime;
3217
3218 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3219
3220 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3221
3222 __ bind(&call_string_add_or_runtime);
3223 if (op_ == Token::ADD) {
3224 GenerateAddStrings(masm);
3225 }
3226
3227 __ bind(&call_runtime);
3228 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003229}
3230
3231
Ben Murdoch257744e2011-11-30 15:57:28 +00003232void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3233 ASSERT(op_ == Token::ADD);
3234 Label left_not_string, call_runtime;
3235
3236 Register left = a1;
3237 Register right = a0;
3238
3239 // Check if left argument is a string.
3240 __ JumpIfSmi(left, &left_not_string);
3241 __ GetObjectType(left, a2, a2);
3242 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3243
3244 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3245 GenerateRegisterArgsPush(masm);
3246 __ TailCallStub(&string_add_left_stub);
3247
3248 // Left operand is not a string, test right.
3249 __ bind(&left_not_string);
3250 __ JumpIfSmi(right, &call_runtime);
3251 __ GetObjectType(right, a2, a2);
3252 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3253
3254 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3255 GenerateRegisterArgsPush(masm);
3256 __ TailCallStub(&string_add_right_stub);
3257
3258 // At least one argument is not a string.
3259 __ bind(&call_runtime);
3260}
3261
3262
3263void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3264 GenerateRegisterArgsPush(masm);
3265 switch (op_) {
3266 case Token::ADD:
3267 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3268 break;
3269 case Token::SUB:
3270 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3271 break;
3272 case Token::MUL:
3273 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3274 break;
3275 case Token::DIV:
3276 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3277 break;
3278 case Token::MOD:
3279 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3280 break;
3281 case Token::BIT_OR:
3282 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3283 break;
3284 case Token::BIT_AND:
3285 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3286 break;
3287 case Token::BIT_XOR:
3288 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3289 break;
3290 case Token::SAR:
3291 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3292 break;
3293 case Token::SHR:
3294 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3295 break;
3296 case Token::SHL:
3297 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3298 break;
3299 default:
3300 UNREACHABLE();
3301 }
3302}
3303
3304
3305void BinaryOpStub::GenerateHeapResultAllocation(
Steve Block44f0eee2011-05-26 01:26:41 +01003306 MacroAssembler* masm,
3307 Register result,
3308 Register heap_number_map,
3309 Register scratch1,
3310 Register scratch2,
3311 Label* gc_required) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003312
3313 // Code below will scratch result if allocation fails. To keep both arguments
3314 // intact for the runtime call result cannot be one of these.
3315 ASSERT(!result.is(a0) && !result.is(a1));
3316
3317 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3318 Label skip_allocation, allocated;
3319 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3320 // If the overwritable operand is already an object, we skip the
3321 // allocation of a heap number.
3322 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3323 // Allocate a heap number for the result.
3324 __ AllocateHeapNumber(
3325 result, scratch1, scratch2, heap_number_map, gc_required);
3326 __ Branch(&allocated);
3327 __ bind(&skip_allocation);
3328 // Use object holding the overwritable operand for result.
3329 __ mov(result, overwritable_operand);
3330 __ bind(&allocated);
3331 } else {
3332 ASSERT(mode_ == NO_OVERWRITE);
3333 __ AllocateHeapNumber(
3334 result, scratch1, scratch2, heap_number_map, gc_required);
3335 }
Steve Block44f0eee2011-05-26 01:26:41 +01003336}
3337
3338
Ben Murdoch257744e2011-11-30 15:57:28 +00003339void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3340 __ Push(a1, a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003341}
3342
3343
3344
3345void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003346 // Untagged case: double input in f4, double result goes
3347 // into f4.
3348 // Tagged case: tagged input on top of stack and in a0,
3349 // tagged result (heap number) goes into v0.
3350
3351 Label input_not_smi;
3352 Label loaded;
3353 Label calculate;
3354 Label invalid_cache;
3355 const Register scratch0 = t5;
3356 const Register scratch1 = t3;
3357 const Register cache_entry = a0;
3358 const bool tagged = (argument_type_ == TAGGED);
3359
3360 if (CpuFeatures::IsSupported(FPU)) {
3361 CpuFeatures::Scope scope(FPU);
3362
3363 if (tagged) {
3364 // Argument is a number and is on stack and in a0.
3365 // Load argument and check if it is a smi.
3366 __ JumpIfNotSmi(a0, &input_not_smi);
3367
3368 // Input is a smi. Convert to double and load the low and high words
3369 // of the double into a2, a3.
3370 __ sra(t0, a0, kSmiTagSize);
3371 __ mtc1(t0, f4);
3372 __ cvt_d_w(f4, f4);
3373 __ Move(a2, a3, f4);
3374 __ Branch(&loaded);
3375
3376 __ bind(&input_not_smi);
3377 // Check if input is a HeapNumber.
3378 __ CheckMap(a0,
3379 a1,
3380 Heap::kHeapNumberMapRootIndex,
3381 &calculate,
3382 DONT_DO_SMI_CHECK);
3383 // Input is a HeapNumber. Store the
3384 // low and high words into a2, a3.
3385 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3386 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3387 } else {
3388 // Input is untagged double in f4. Output goes to f4.
3389 __ Move(a2, a3, f4);
3390 }
3391 __ bind(&loaded);
3392 // a2 = low 32 bits of double value.
3393 // a3 = high 32 bits of double value.
3394 // Compute hash (the shifts are arithmetic):
3395 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3396 __ Xor(a1, a2, a3);
3397 __ sra(t0, a1, 16);
3398 __ Xor(a1, a1, t0);
3399 __ sra(t0, a1, 8);
3400 __ Xor(a1, a1, t0);
3401 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3402 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3403
3404 // a2 = low 32 bits of double value.
3405 // a3 = high 32 bits of double value.
3406 // a1 = TranscendentalCache::hash(double value).
3407 __ li(cache_entry, Operand(
3408 ExternalReference::transcendental_cache_array_address(
3409 masm->isolate())));
3410 // a0 points to cache array.
3411 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3412 Isolate::Current()->transcendental_cache()->caches_[0])));
3413 // a0 points to the cache for the type type_.
3414 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3415 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3416
3417#ifdef DEBUG
3418 // Check that the layout of cache elements match expectations.
3419 { TranscendentalCache::SubCache::Element test_elem[2];
3420 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3421 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3422 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3423 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3424 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3425 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3426 CHECK_EQ(0, elem_in0 - elem_start);
3427 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3428 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3429 }
3430#endif
3431
3432 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3433 __ sll(t0, a1, 1);
3434 __ Addu(a1, a1, t0);
3435 __ sll(t0, a1, 2);
3436 __ Addu(cache_entry, cache_entry, t0);
3437
3438 // Check if cache matches: Double value is stored in uint32_t[2] array.
3439 __ lw(t0, MemOperand(cache_entry, 0));
3440 __ lw(t1, MemOperand(cache_entry, 4));
3441 __ lw(t2, MemOperand(cache_entry, 8));
Ben Murdoch257744e2011-11-30 15:57:28 +00003442 __ Branch(&calculate, ne, a2, Operand(t0));
3443 __ Branch(&calculate, ne, a3, Operand(t1));
3444 // Cache hit. Load result, cleanup and return.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003445 Counters* counters = masm->isolate()->counters();
3446 __ IncrementCounter(
3447 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003448 if (tagged) {
3449 // Pop input value from stack and load result into v0.
3450 __ Drop(1);
3451 __ mov(v0, t2);
3452 } else {
3453 // Load result into f4.
3454 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3455 }
3456 __ Ret();
3457 } // if (CpuFeatures::IsSupported(FPU))
3458
3459 __ bind(&calculate);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003460 Counters* counters = masm->isolate()->counters();
3461 __ IncrementCounter(
3462 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003463 if (tagged) {
3464 __ bind(&invalid_cache);
3465 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3466 masm->isolate()),
3467 1,
3468 1);
3469 } else {
3470 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3471 CpuFeatures::Scope scope(FPU);
3472
3473 Label no_update;
3474 Label skip_cache;
3475 const Register heap_number_map = t2;
3476
3477 // Call C function to calculate the result and update the cache.
3478 // Register a0 holds precalculated cache entry address; preserve
3479 // it on the stack and pop it into register cache_entry after the
3480 // call.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003481 __ Push(cache_entry, a2, a3);
Ben Murdoch257744e2011-11-30 15:57:28 +00003482 GenerateCallCFunction(masm, scratch0);
3483 __ GetCFunctionDoubleResult(f4);
3484
3485 // Try to update the cache. If we cannot allocate a
3486 // heap number, we return the result without updating.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003487 __ Pop(cache_entry, a2, a3);
Ben Murdoch257744e2011-11-30 15:57:28 +00003488 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3489 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3490 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3491
3492 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3493 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3494 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3495
3496 __ mov(v0, cache_entry);
3497 __ Ret();
3498
3499 __ bind(&invalid_cache);
3500 // The cache is invalid. Call runtime which will recreate the
3501 // cache.
3502 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3503 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3504 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003505 {
3506 FrameScope scope(masm, StackFrame::INTERNAL);
3507 __ push(a0);
3508 __ CallRuntime(RuntimeFunction(), 1);
3509 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003510 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3511 __ Ret();
3512
3513 __ bind(&skip_cache);
3514 // Call C function to calculate the result and answer directly
3515 // without updating the cache.
3516 GenerateCallCFunction(masm, scratch0);
3517 __ GetCFunctionDoubleResult(f4);
3518 __ bind(&no_update);
3519
3520 // We return the value in f4 without adding it to the cache, but
3521 // we cause a scavenging GC so that future allocations will succeed.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003522 {
3523 FrameScope scope(masm, StackFrame::INTERNAL);
Ben Murdoch257744e2011-11-30 15:57:28 +00003524
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003525 // Allocate an aligned object larger than a HeapNumber.
3526 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3527 __ li(scratch0, Operand(4 * kPointerSize));
3528 __ push(scratch0);
3529 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3530 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003531 __ Ret();
3532 }
3533}
3534
3535
3536void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3537 Register scratch) {
3538 __ push(ra);
3539 __ PrepareCallCFunction(2, scratch);
3540 if (IsMipsSoftFloatABI) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003541 __ Move(a0, a1, f4);
Ben Murdoch257744e2011-11-30 15:57:28 +00003542 } else {
3543 __ mov_d(f12, f4);
3544 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003545 AllowExternalCallThatCantCauseGC scope(masm);
3546 Isolate* isolate = masm->isolate();
Ben Murdoch257744e2011-11-30 15:57:28 +00003547 switch (type_) {
3548 case TranscendentalCache::SIN:
3549 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003550 ExternalReference::math_sin_double_function(isolate),
3551 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003552 break;
3553 case TranscendentalCache::COS:
3554 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003555 ExternalReference::math_cos_double_function(isolate),
3556 0, 1);
3557 break;
3558 case TranscendentalCache::TAN:
3559 __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3560 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003561 break;
3562 case TranscendentalCache::LOG:
3563 __ CallCFunction(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003564 ExternalReference::math_log_double_function(isolate),
3565 0, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003566 break;
3567 default:
3568 UNIMPLEMENTED();
3569 break;
3570 }
3571 __ pop(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003572}
3573
3574
3575Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
Ben Murdoch257744e2011-11-30 15:57:28 +00003576 switch (type_) {
3577 // Add more cases when necessary.
3578 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3579 case TranscendentalCache::COS: return Runtime::kMath_cos;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003580 case TranscendentalCache::TAN: return Runtime::kMath_tan;
Ben Murdoch257744e2011-11-30 15:57:28 +00003581 case TranscendentalCache::LOG: return Runtime::kMath_log;
3582 default:
3583 UNIMPLEMENTED();
3584 return Runtime::kAbort;
3585 }
Steve Block44f0eee2011-05-26 01:26:41 +01003586}
3587
3588
3589void StackCheckStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003590 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003591}
3592
3593
Ben Murdoch257744e2011-11-30 15:57:28 +00003594void MathPowStub::Generate(MacroAssembler* masm) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00003595 CpuFeatures::Scope fpu_scope(FPU);
3596 const Register base = a1;
3597 const Register exponent = a2;
3598 const Register heapnumbermap = t1;
3599 const Register heapnumber = v0;
3600 const DoubleRegister double_base = f2;
3601 const DoubleRegister double_exponent = f4;
3602 const DoubleRegister double_result = f0;
3603 const DoubleRegister double_scratch = f6;
3604 const FPURegister single_scratch = f8;
3605 const Register scratch = t5;
3606 const Register scratch2 = t3;
Ben Murdoch257744e2011-11-30 15:57:28 +00003607
Ben Murdochc7cc0282012-03-05 14:35:55 +00003608 Label call_runtime, done, exponent_not_smi, int_exponent;
3609 if (exponent_type_ == ON_STACK) {
3610 Label base_is_smi, unpack_exponent;
3611 // The exponent and base are supplied as arguments on the stack.
3612 // This can only happen if the stub is called from non-optimized code.
3613 // Load input parameters from stack to double registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00003614 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3615 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3616
Ben Murdochc7cc0282012-03-05 14:35:55 +00003617 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
Ben Murdoch257744e2011-11-30 15:57:28 +00003618
Ben Murdochc7cc0282012-03-05 14:35:55 +00003619 __ JumpIfSmi(base, &base_is_smi);
Ben Murdoch257744e2011-11-30 15:57:28 +00003620 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3621 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
Ben Murdoch257744e2011-11-30 15:57:28 +00003622
Ben Murdochc7cc0282012-03-05 14:35:55 +00003623 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3624 __ jmp(&unpack_exponent);
3625
3626 __ bind(&base_is_smi);
3627 __ SmiUntag(base);
3628 __ mtc1(base, single_scratch);
3629 __ cvt_d_w(double_base, single_scratch);
3630 __ bind(&unpack_exponent);
3631
Ben Murdoch257744e2011-11-30 15:57:28 +00003632 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3633 __ SmiUntag(exponent);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003634 __ jmp(&int_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00003635
3636 __ bind(&exponent_not_smi);
3637 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3638 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
Ben Murdoch257744e2011-11-30 15:57:28 +00003639 __ ldc1(double_exponent,
3640 FieldMemOperand(exponent, HeapNumber::kValueOffset));
Ben Murdochc7cc0282012-03-05 14:35:55 +00003641 } else if (exponent_type_ == TAGGED) {
3642 // Base is already in double_base.
3643 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3644 __ SmiUntag(exponent);
3645 __ jmp(&int_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00003646
Ben Murdochc7cc0282012-03-05 14:35:55 +00003647 __ bind(&exponent_not_smi);
3648 __ ldc1(double_exponent,
3649 FieldMemOperand(exponent, HeapNumber::kValueOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00003650 }
3651
Ben Murdochc7cc0282012-03-05 14:35:55 +00003652 if (exponent_type_ != INTEGER) {
3653 Label int_exponent_convert;
3654 // Detect integer exponents stored as double.
3655 __ EmitFPUTruncate(kRoundToMinusInf,
3656 single_scratch,
3657 double_exponent,
3658 scratch,
3659 scratch2,
3660 kCheckForInexactConversion);
3661 // scratch2 == 0 means there was no conversion error.
3662 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3663
3664 if (exponent_type_ == ON_STACK) {
3665 // Detect square root case. Crankshaft detects constant +/-0.5 at
3666 // compile time and uses DoMathPowHalf instead. We then skip this check
3667 // for non-constant cases of +/-0.5 as these hardly occur.
3668 Label not_plus_half;
3669
3670 // Test for 0.5.
3671 __ Move(double_scratch, 0.5);
3672 __ BranchF(USE_DELAY_SLOT,
3673 &not_plus_half,
3674 NULL,
3675 ne,
3676 double_exponent,
3677 double_scratch);
3678
3679 // Calculates square root of base. Check for the special case of
3680 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3681 __ Move(double_scratch, -V8_INFINITY);
3682 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3683 __ neg_d(double_result, double_scratch);
3684
3685 // Add +0 to convert -0 to +0.
3686 __ add_d(double_scratch, double_base, kDoubleRegZero);
3687 __ sqrt_d(double_result, double_scratch);
3688 __ jmp(&done);
3689
3690 __ bind(&not_plus_half);
3691 __ Move(double_scratch, -0.5);
3692 __ BranchF(USE_DELAY_SLOT,
3693 &call_runtime,
3694 NULL,
3695 ne,
3696 double_exponent,
3697 double_scratch);
3698
3699 // Calculates square root of base. Check for the special case of
3700 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3701 __ Move(double_scratch, -V8_INFINITY);
3702 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3703 __ Move(double_result, kDoubleRegZero);
3704
3705 // Add +0 to convert -0 to +0.
3706 __ add_d(double_scratch, double_base, kDoubleRegZero);
3707 __ Move(double_result, 1);
3708 __ sqrt_d(double_scratch, double_scratch);
3709 __ div_d(double_result, double_result, double_scratch);
3710 __ jmp(&done);
3711 }
3712
3713 __ push(ra);
3714 {
3715 AllowExternalCallThatCantCauseGC scope(masm);
3716 __ PrepareCallCFunction(0, 2, scratch);
3717 __ SetCallCDoubleArguments(double_base, double_exponent);
3718 __ CallCFunction(
3719 ExternalReference::power_double_double_function(masm->isolate()),
3720 0, 2);
3721 }
3722 __ pop(ra);
3723 __ GetCFunctionDoubleResult(double_result);
3724 __ jmp(&done);
3725
3726 __ bind(&int_exponent_convert);
3727 __ mfc1(exponent, single_scratch);
3728 }
3729
3730 // Calculate power with integer exponent.
3731 __ bind(&int_exponent);
3732
3733 __ mov(scratch, exponent); // Back up exponent.
3734 __ mov_d(double_scratch, double_base); // Back up base.
3735 __ Move(double_result, 1.0);
3736
3737 // Get absolute value of exponent.
3738 Label positive_exponent;
3739 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3740 __ Subu(scratch, zero_reg, scratch);
3741 __ bind(&positive_exponent);
3742
3743 Label while_true, no_carry, loop_end;
3744 __ bind(&while_true);
3745
3746 __ And(scratch2, scratch, 1);
3747
3748 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3749 __ mul_d(double_result, double_result, double_scratch);
3750 __ bind(&no_carry);
3751
3752 __ sra(scratch, scratch, 1);
3753
3754 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3755 __ mul_d(double_scratch, double_scratch, double_scratch);
3756
3757 __ Branch(&while_true);
3758
3759 __ bind(&loop_end);
3760
3761 __ Branch(&done, ge, exponent, Operand(zero_reg));
3762 __ Move(double_scratch, 1.0);
3763 __ div_d(double_result, double_scratch, double_result);
3764 // Test whether result is zero. Bail out to check for subnormal result.
3765 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3766 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3767
3768 // double_exponent may not contain the exponent value if the input was a
3769 // smi. We set it with exponent value before bailing out.
3770 __ mtc1(exponent, single_scratch);
3771 __ cvt_d_w(double_exponent, single_scratch);
3772
3773 // Returning or bailing out.
3774 Counters* counters = masm->isolate()->counters();
3775 if (exponent_type_ == ON_STACK) {
3776 // The arguments are still on the stack.
3777 __ bind(&call_runtime);
3778 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3779
3780 // The stub is called from non-optimized code, which expects the result
3781 // as heap number in exponent.
3782 __ bind(&done);
3783 __ AllocateHeapNumber(
3784 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3785 __ sdc1(double_result,
3786 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3787 ASSERT(heapnumber.is(v0));
3788 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3789 __ DropAndRet(2);
3790 } else {
3791 __ push(ra);
3792 {
3793 AllowExternalCallThatCantCauseGC scope(masm);
3794 __ PrepareCallCFunction(0, 2, scratch);
3795 __ SetCallCDoubleArguments(double_base, double_exponent);
3796 __ CallCFunction(
3797 ExternalReference::power_double_double_function(masm->isolate()),
3798 0, 2);
3799 }
3800 __ pop(ra);
3801 __ GetCFunctionDoubleResult(double_result);
3802
3803 __ bind(&done);
3804 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3805 __ Ret();
3806 }
Steve Block44f0eee2011-05-26 01:26:41 +01003807}
3808
3809
3810bool CEntryStub::NeedsImmovableCode() {
3811 return true;
3812}
3813
3814
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003815bool CEntryStub::IsPregenerated() {
3816 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3817 result_size_ == 1;
3818}
3819
3820
3821void CodeStub::GenerateStubsAheadOfTime() {
3822 CEntryStub::GenerateAheadOfTime();
3823 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3824 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3825 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3826}
3827
3828
3829void CodeStub::GenerateFPStubs() {
3830 CEntryStub save_doubles(1, kSaveFPRegs);
3831 Handle<Code> code = save_doubles.GetCode();
3832 code->set_is_pregenerated(true);
3833 StoreBufferOverflowStub stub(kSaveFPRegs);
3834 stub.GetCode()->set_is_pregenerated(true);
3835 code->GetIsolate()->set_fp_stubs_generated(true);
3836}
3837
3838
3839void CEntryStub::GenerateAheadOfTime() {
3840 CEntryStub stub(1, kDontSaveFPRegs);
3841 Handle<Code> code = stub.GetCode();
3842 code->set_is_pregenerated(true);
3843}
3844
3845
Steve Block44f0eee2011-05-26 01:26:41 +01003846void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003847 __ Throw(v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003848}
3849
3850
3851void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3852 UncatchableExceptionType type) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003853 __ ThrowUncatchable(type, v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003854}
3855
3856
3857void CEntryStub::GenerateCore(MacroAssembler* masm,
3858 Label* throw_normal_exception,
3859 Label* throw_termination_exception,
3860 Label* throw_out_of_memory_exception,
3861 bool do_gc,
3862 bool always_allocate) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003863 // v0: result parameter for PerformGC, if any
3864 // s0: number of arguments including receiver (C callee-saved)
3865 // s1: pointer to the first argument (C callee-saved)
3866 // s2: pointer to builtin function (C callee-saved)
3867
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003868 Isolate* isolate = masm->isolate();
3869
Ben Murdoch257744e2011-11-30 15:57:28 +00003870 if (do_gc) {
3871 // Move result passed in v0 into a0 to call PerformGC.
3872 __ mov(a0, v0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003873 __ PrepareCallCFunction(1, 0, a1);
3874 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
Ben Murdoch257744e2011-11-30 15:57:28 +00003875 }
3876
3877 ExternalReference scope_depth =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003878 ExternalReference::heap_always_allocate_scope_depth(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00003879 if (always_allocate) {
3880 __ li(a0, Operand(scope_depth));
3881 __ lw(a1, MemOperand(a0));
3882 __ Addu(a1, a1, Operand(1));
3883 __ sw(a1, MemOperand(a0));
3884 }
3885
3886 // Prepare arguments for C routine: a0 = argc, a1 = argv
3887 __ mov(a0, s0);
3888 __ mov(a1, s1);
3889
3890 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3891 // also need to reserve the 4 argument slots on the stack.
3892
3893 __ AssertStackIsAligned();
3894
3895 __ li(a2, Operand(ExternalReference::isolate_address()));
3896
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003897 // To let the GC traverse the return address of the exit frames, we need to
3898 // know where the return address is. The CEntryStub is unmovable, so
3899 // we can store the address on the stack to be able to find it again and
3900 // we never have to restore it, because it will not change.
Ben Murdoch257744e2011-11-30 15:57:28 +00003901 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3902 // This branch-and-link sequence is needed to find the current PC on mips,
3903 // saved to the ra register.
3904 // Use masm-> here instead of the double-underscore macro since extra
3905 // coverage code can interfere with the proper calculation of ra.
3906 Label find_ra;
3907 masm->bal(&find_ra); // bal exposes branch delay slot.
3908 masm->nop(); // Branch delay slot nop.
3909 masm->bind(&find_ra);
3910
3911 // Adjust the value in ra to point to the correct return location, 2nd
3912 // instruction past the real call into C code (the jalr(t9)), and push it.
3913 // This is the return address of the exit frame.
3914 const int kNumInstructionsToJump = 6;
3915 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3916 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
Ben Murdoch589d6972011-11-30 16:04:58 +00003917 masm->Subu(sp, sp, kCArgsSlotsSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00003918 // Stack is still aligned.
3919
3920 // Call the C routine.
3921 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3922 masm->jalr(t9);
3923 masm->nop(); // Branch delay slot nop.
3924 // Make sure the stored 'ra' points to this position.
3925 ASSERT_EQ(kNumInstructionsToJump,
3926 masm->InstructionsGeneratedSince(&find_ra));
3927 }
3928
3929 // Restore stack (remove arg slots).
Ben Murdoch589d6972011-11-30 16:04:58 +00003930 __ Addu(sp, sp, kCArgsSlotsSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00003931
3932 if (always_allocate) {
3933 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3934 __ li(a2, Operand(scope_depth));
3935 __ lw(a3, MemOperand(a2));
3936 __ Subu(a3, a3, Operand(1));
3937 __ sw(a3, MemOperand(a2));
3938 }
3939
3940 // Check for failure result.
3941 Label failure_returned;
3942 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3943 __ addiu(a2, v0, 1);
3944 __ andi(t0, a2, kFailureTagMask);
3945 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3946
3947 // Exit C frame and return.
3948 // v0:v1: result
3949 // sp: stack pointer
3950 // fp: frame pointer
3951 __ LeaveExitFrame(save_doubles_, s0);
3952 __ Ret();
3953
3954 // Check if we should retry or throw exception.
3955 Label retry;
3956 __ bind(&failure_returned);
3957 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3958 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3959 __ Branch(&retry, eq, t0, Operand(zero_reg));
3960
3961 // Special handling of out of memory exceptions.
3962 Failure* out_of_memory = Failure::OutOfMemoryException();
3963 __ Branch(throw_out_of_memory_exception, eq,
3964 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3965
3966 // Retrieve the pending exception and clear the variable.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003967 __ li(a3, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00003968 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003969 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003970 __ lw(v0, MemOperand(t0));
3971 __ sw(a3, MemOperand(t0));
3972
3973 // Special handling of termination exceptions which are uncatchable
3974 // by javascript code.
3975 __ Branch(throw_termination_exception, eq,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003976 v0, Operand(isolate->factory()->termination_exception()));
Ben Murdoch257744e2011-11-30 15:57:28 +00003977
3978 // Handle normal exception.
3979 __ jmp(throw_normal_exception);
3980
3981 __ bind(&retry);
3982 // Last failure (v0) will be moved to (a0) for parameter when retrying.
Steve Block44f0eee2011-05-26 01:26:41 +01003983}
3984
3985
3986void CEntryStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003987 // Called from JavaScript; parameters are on stack as if calling JS function
3988 // a0: number of arguments including receiver
3989 // a1: pointer to builtin function
3990 // fp: frame pointer (restored after C call)
3991 // sp: stack pointer (restored as callee's sp after C call)
3992 // cp: current context (C callee-saved)
3993
3994 // NOTE: Invocations of builtins may return failure objects
3995 // instead of a proper result. The builtin entry handles
3996 // this by performing a garbage collection and retrying the
3997 // builtin once.
3998
3999 // Compute the argv pointer in a callee-saved register.
4000 __ sll(s1, a0, kPointerSizeLog2);
4001 __ Addu(s1, sp, s1);
4002 __ Subu(s1, s1, Operand(kPointerSize));
4003
4004 // Enter the exit frame that transitions from JavaScript to C++.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004005 FrameScope scope(masm, StackFrame::MANUAL);
Ben Murdoch257744e2011-11-30 15:57:28 +00004006 __ EnterExitFrame(save_doubles_);
4007
Ben Murdochc7cc0282012-03-05 14:35:55 +00004008 // Set up argc and the builtin function in callee-saved registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00004009 __ mov(s0, a0);
4010 __ mov(s2, a1);
4011
4012 // s0: number of arguments (C callee-saved)
4013 // s1: pointer to first argument (C callee-saved)
4014 // s2: pointer to builtin function (C callee-saved)
4015
4016 Label throw_normal_exception;
4017 Label throw_termination_exception;
4018 Label throw_out_of_memory_exception;
4019
4020 // Call into the runtime system.
4021 GenerateCore(masm,
4022 &throw_normal_exception,
4023 &throw_termination_exception,
4024 &throw_out_of_memory_exception,
4025 false,
4026 false);
4027
4028 // Do space-specific GC and retry runtime call.
4029 GenerateCore(masm,
4030 &throw_normal_exception,
4031 &throw_termination_exception,
4032 &throw_out_of_memory_exception,
4033 true,
4034 false);
4035
4036 // Do full GC and retry runtime call one final time.
4037 Failure* failure = Failure::InternalError();
4038 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4039 GenerateCore(masm,
4040 &throw_normal_exception,
4041 &throw_termination_exception,
4042 &throw_out_of_memory_exception,
4043 true,
4044 true);
4045
4046 __ bind(&throw_out_of_memory_exception);
4047 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4048
4049 __ bind(&throw_termination_exception);
4050 GenerateThrowUncatchable(masm, TERMINATION);
4051
4052 __ bind(&throw_normal_exception);
4053 GenerateThrowTOS(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01004054}
4055
4056
4057void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004058 Label invoke, handler_entry, exit;
4059 Isolate* isolate = masm->isolate();
Ben Murdoch257744e2011-11-30 15:57:28 +00004060
4061 // Registers:
4062 // a0: entry address
4063 // a1: function
Ben Murdochc7cc0282012-03-05 14:35:55 +00004064 // a2: receiver
Ben Murdoch257744e2011-11-30 15:57:28 +00004065 // a3: argc
4066 //
4067 // Stack:
4068 // 4 args slots
4069 // args
4070
4071 // Save callee saved registers on the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004072 __ MultiPush(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00004073
Ben Murdoch589d6972011-11-30 16:04:58 +00004074 if (CpuFeatures::IsSupported(FPU)) {
4075 CpuFeatures::Scope scope(FPU);
4076 // Save callee-saved FPU registers.
4077 __ MultiPushFPU(kCalleeSavedFPU);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004078 // Set up the reserved register for 0.0.
4079 __ Move(kDoubleRegZero, 0.0);
Ben Murdoch589d6972011-11-30 16:04:58 +00004080 }
4081
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004082
Ben Murdoch257744e2011-11-30 15:57:28 +00004083 // Load argv in s0 register.
Ben Murdoch589d6972011-11-30 16:04:58 +00004084 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4085 if (CpuFeatures::IsSupported(FPU)) {
4086 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4087 }
4088
4089 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004090
4091 // We build an EntryFrame.
4092 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4093 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4094 __ li(t2, Operand(Smi::FromInt(marker)));
4095 __ li(t1, Operand(Smi::FromInt(marker)));
Ben Murdoch589d6972011-11-30 16:04:58 +00004096 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004097 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004098 __ lw(t0, MemOperand(t0));
4099 __ Push(t3, t2, t1, t0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00004100 // Set up frame pointer for the frame to be pushed.
Ben Murdoch257744e2011-11-30 15:57:28 +00004101 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
4102
4103 // Registers:
4104 // a0: entry_address
4105 // a1: function
Ben Murdochc7cc0282012-03-05 14:35:55 +00004106 // a2: receiver_pointer
Ben Murdoch257744e2011-11-30 15:57:28 +00004107 // a3: argc
4108 // s0: argv
4109 //
4110 // Stack:
4111 // caller fp |
4112 // function slot | entry frame
4113 // context slot |
4114 // bad fp (0xff...f) |
4115 // callee saved registers + ra
4116 // 4 args slots
4117 // args
4118
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004119 // If this is the outermost JS call, set js_entry_sp value.
4120 Label non_outermost_js;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004121 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004122 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4123 __ lw(t2, MemOperand(t1));
4124 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4125 __ sw(fp, MemOperand(t1));
4126 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4127 Label cont;
4128 __ b(&cont);
4129 __ nop(); // Branch delay slot nop.
4130 __ bind(&non_outermost_js);
4131 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4132 __ bind(&cont);
4133 __ push(t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004134
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004135 // Jump to a faked try block that does the invoke, with a faked catch
4136 // block that sets the pending exception.
4137 __ jmp(&invoke);
4138 __ bind(&handler_entry);
4139 handler_offset_ = handler_entry.pos();
4140 // Caught exception: Store result (exception) in the pending exception
4141 // field in the JSEnv and return a failure sentinel. Coming in here the
4142 // fp will be invalid because the PushTryHandler below sets it to 0 to
4143 // signal the existence of the JSEntry frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00004144 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004145 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004146 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4147 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4148 __ b(&exit); // b exposes branch delay slot.
4149 __ nop(); // Branch delay slot nop.
4150
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004151 // Invoke: Link this frame into the handler chain. There's only one
4152 // handler block in this code object, so its index is 0.
Ben Murdoch257744e2011-11-30 15:57:28 +00004153 __ bind(&invoke);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004154 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
Ben Murdoch257744e2011-11-30 15:57:28 +00004155 // If an exception not caught by another handler occurs, this handler
4156 // returns control to the code after the bal(&invoke) above, which
4157 // restores all kCalleeSaved registers (including cp and fp) to their
4158 // saved values before returning a failure to C.
4159
4160 // Clear any pending exceptions.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004161 __ li(t1, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00004162 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004163 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004164 __ sw(t1, MemOperand(t0));
4165
4166 // Invoke the function by calling through JS entry trampoline builtin.
4167 // Notice that we cannot store a reference to the trampoline code directly in
4168 // this stub, because runtime stubs are not traversed when doing GC.
4169
4170 // Registers:
4171 // a0: entry_address
4172 // a1: function
Ben Murdochc7cc0282012-03-05 14:35:55 +00004173 // a2: receiver_pointer
Ben Murdoch257744e2011-11-30 15:57:28 +00004174 // a3: argc
4175 // s0: argv
4176 //
4177 // Stack:
4178 // handler frame
4179 // entry frame
4180 // callee saved registers + ra
4181 // 4 args slots
4182 // args
4183
4184 if (is_construct) {
4185 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004186 isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004187 __ li(t0, Operand(construct_entry));
4188 } else {
4189 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4190 __ li(t0, Operand(entry));
4191 }
4192 __ lw(t9, MemOperand(t0)); // Deref address.
4193
4194 // Call JSEntryTrampoline.
4195 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4196 __ Call(t9);
4197
4198 // Unlink this frame from the handler chain.
4199 __ PopTryHandler();
4200
4201 __ bind(&exit); // v0 holds result
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004202 // Check if the current stack frame is marked as the outermost JS frame.
4203 Label non_outermost_js_2;
4204 __ pop(t1);
4205 __ Branch(&non_outermost_js_2, ne, t1,
4206 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4207 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4208 __ sw(zero_reg, MemOperand(t1));
4209 __ bind(&non_outermost_js_2);
Ben Murdoch257744e2011-11-30 15:57:28 +00004210
4211 // Restore the top frame descriptors from the stack.
4212 __ pop(t1);
Ben Murdoch589d6972011-11-30 16:04:58 +00004213 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004214 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004215 __ sw(t1, MemOperand(t0));
4216
4217 // Reset the stack to the callee saved registers.
4218 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4219
Ben Murdoch589d6972011-11-30 16:04:58 +00004220 if (CpuFeatures::IsSupported(FPU)) {
4221 CpuFeatures::Scope scope(FPU);
4222 // Restore callee-saved fpu registers.
4223 __ MultiPopFPU(kCalleeSavedFPU);
4224 }
4225
Ben Murdoch257744e2011-11-30 15:57:28 +00004226 // Restore callee saved registers from the stack.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004227 __ MultiPop(kCalleeSaved | ra.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00004228 // Return.
4229 __ Jump(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01004230}
4231
4232
Ben Murdoch257744e2011-11-30 15:57:28 +00004233// Uses registers a0 to t0.
4234// Expected input (depending on whether args are in registers or on the stack):
4235// * object: a0 or at sp + 1 * kPointerSize.
4236// * function: a1 or at sp.
4237//
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004238// An inlined call site may have been generated before calling this stub.
4239// In this case the offset to the inline site to patch is passed on the stack,
4240// in the safepoint slot for register t0.
Steve Block44f0eee2011-05-26 01:26:41 +01004241void InstanceofStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004242 // Call site inlining and patching implies arguments in registers.
4243 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4244 // ReturnTrueFalse is only implemented for inlined call sites.
4245 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4246
4247 // Fixed register usage throughout the stub:
4248 const Register object = a0; // Object (lhs).
4249 Register map = a3; // Map of the object.
4250 const Register function = a1; // Function (rhs).
4251 const Register prototype = t0; // Prototype of the function.
4252 const Register inline_site = t5;
4253 const Register scratch = a2;
4254
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004255 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4256
Ben Murdoch257744e2011-11-30 15:57:28 +00004257 Label slow, loop, is_instance, is_not_instance, not_js_object;
4258
4259 if (!HasArgsInRegisters()) {
4260 __ lw(object, MemOperand(sp, 1 * kPointerSize));
4261 __ lw(function, MemOperand(sp, 0));
4262 }
4263
4264 // Check that the left hand is a JS object and load map.
4265 __ JumpIfSmi(object, &not_js_object);
4266 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4267
4268 // If there is a call site cache don't look in the global cache, but do the
4269 // real lookup and update the call site cache.
4270 if (!HasCallSiteInlineCheck()) {
4271 Label miss;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004272 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4273 __ Branch(&miss, ne, function, Operand(at));
4274 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4275 __ Branch(&miss, ne, map, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +00004276 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4277 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4278
4279 __ bind(&miss);
4280 }
4281
4282 // Get the prototype of the function.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004283 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00004284
4285 // Check that the function prototype is a JS object.
4286 __ JumpIfSmi(prototype, &slow);
4287 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4288
4289 // Update the global instanceof or call site inlined cache with the current
4290 // map and function. The cached answer will be set when it is known below.
4291 if (!HasCallSiteInlineCheck()) {
4292 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4293 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4294 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004295 ASSERT(HasArgsInRegisters());
4296 // Patch the (relocated) inlined map check.
4297
4298 // The offset was stored in t0 safepoint slot.
4299 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4300 __ LoadFromSafepointRegisterSlot(scratch, t0);
4301 __ Subu(inline_site, ra, scratch);
4302 // Get the map location in scratch and patch it.
4303 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4304 __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004305 }
4306
4307 // Register mapping: a3 is object map and t0 is function prototype.
4308 // Get prototype of object into a2.
4309 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4310
4311 // We don't need map any more. Use it as a scratch register.
4312 Register scratch2 = map;
4313 map = no_reg;
4314
4315 // Loop through the prototype chain looking for the function prototype.
4316 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4317 __ bind(&loop);
4318 __ Branch(&is_instance, eq, scratch, Operand(prototype));
4319 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4320 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4321 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4322 __ Branch(&loop);
4323
4324 __ bind(&is_instance);
4325 ASSERT(Smi::FromInt(0) == 0);
4326 if (!HasCallSiteInlineCheck()) {
4327 __ mov(v0, zero_reg);
4328 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4329 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004330 // Patch the call site to return true.
4331 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4332 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4333 // Get the boolean result location in scratch and patch it.
4334 __ PatchRelocatedValue(inline_site, scratch, v0);
4335
4336 if (!ReturnTrueFalseObject()) {
4337 ASSERT_EQ(Smi::FromInt(0), 0);
4338 __ mov(v0, zero_reg);
4339 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004340 }
4341 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4342
4343 __ bind(&is_not_instance);
4344 if (!HasCallSiteInlineCheck()) {
4345 __ li(v0, Operand(Smi::FromInt(1)));
4346 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4347 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004348 // Patch the call site to return false.
4349 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4350 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4351 // Get the boolean result location in scratch and patch it.
4352 __ PatchRelocatedValue(inline_site, scratch, v0);
4353
4354 if (!ReturnTrueFalseObject()) {
4355 __ li(v0, Operand(Smi::FromInt(1)));
4356 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004357 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004358
Ben Murdoch257744e2011-11-30 15:57:28 +00004359 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4360
4361 Label object_not_null, object_not_null_or_smi;
4362 __ bind(&not_js_object);
4363 // Before null, smi and string value checks, check that the rhs is a function
4364 // as for a non-function rhs an exception needs to be thrown.
4365 __ JumpIfSmi(function, &slow);
4366 __ GetObjectType(function, scratch2, scratch);
4367 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4368
4369 // Null is not instance of anything.
4370 __ Branch(&object_not_null, ne, scratch,
4371 Operand(masm->isolate()->factory()->null_value()));
4372 __ li(v0, Operand(Smi::FromInt(1)));
4373 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4374
4375 __ bind(&object_not_null);
4376 // Smi values are not instances of anything.
4377 __ JumpIfNotSmi(object, &object_not_null_or_smi);
4378 __ li(v0, Operand(Smi::FromInt(1)));
4379 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4380
4381 __ bind(&object_not_null_or_smi);
4382 // String values are not instances of anything.
4383 __ IsObjectJSStringType(object, scratch, &slow);
4384 __ li(v0, Operand(Smi::FromInt(1)));
4385 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4386
4387 // Slow-case. Tail call builtin.
4388 __ bind(&slow);
4389 if (!ReturnTrueFalseObject()) {
4390 if (HasArgsInRegisters()) {
4391 __ Push(a0, a1);
4392 }
4393 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4394 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004395 {
4396 FrameScope scope(masm, StackFrame::INTERNAL);
4397 __ Push(a0, a1);
4398 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4399 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004400 __ mov(a0, v0);
4401 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4402 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4403 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4404 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4405 }
Steve Block44f0eee2011-05-26 01:26:41 +01004406}
4407
4408
Ben Murdoch257744e2011-11-30 15:57:28 +00004409Register InstanceofStub::left() { return a0; }
4410
4411
4412Register InstanceofStub::right() { return a1; }
4413
4414
Steve Block44f0eee2011-05-26 01:26:41 +01004415void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004416 // The displacement is the offset of the last parameter (if any)
4417 // relative to the frame pointer.
4418 static const int kDisplacement =
4419 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4420
4421 // Check that the key is a smiGenerateReadElement.
4422 Label slow;
4423 __ JumpIfNotSmi(a1, &slow);
4424
4425 // Check if the calling frame is an arguments adaptor frame.
4426 Label adaptor;
4427 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4428 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4429 __ Branch(&adaptor,
4430 eq,
4431 a3,
4432 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4433
4434 // Check index (a1) against formal parameters count limit passed in
4435 // through register a0. Use unsigned comparison to get negative
4436 // check for free.
4437 __ Branch(&slow, hs, a1, Operand(a0));
4438
4439 // Read the argument from the stack and return it.
4440 __ subu(a3, a0, a1);
4441 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4442 __ Addu(a3, fp, Operand(t3));
4443 __ lw(v0, MemOperand(a3, kDisplacement));
4444 __ Ret();
4445
4446 // Arguments adaptor case: Check index (a1) against actual arguments
4447 // limit found in the arguments adaptor frame. Use unsigned
4448 // comparison to get negative check for free.
4449 __ bind(&adaptor);
4450 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4451 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4452
4453 // Read the argument from the adaptor frame and return it.
4454 __ subu(a3, a0, a1);
4455 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4456 __ Addu(a3, a2, Operand(t3));
4457 __ lw(v0, MemOperand(a3, kDisplacement));
4458 __ Ret();
4459
4460 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4461 // by calling the runtime system.
4462 __ bind(&slow);
4463 __ push(a1);
4464 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004465}
4466
4467
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004468void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004469 // sp[0] : number of parameters
4470 // sp[4] : receiver displacement
4471 // sp[8] : function
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004472 // Check if the calling frame is an arguments adaptor frame.
4473 Label runtime;
4474 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4475 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4476 __ Branch(&runtime, ne,
4477 a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004478
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004479 // Patch the arguments.length and the parameters pointer in the current frame.
4480 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4481 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4482 __ sll(t3, a2, 1);
4483 __ Addu(a3, a3, Operand(t3));
4484 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4485 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4486
4487 __ bind(&runtime);
4488 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4489}
4490
4491
4492void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4493 // Stack layout:
4494 // sp[0] : number of parameters (tagged)
4495 // sp[4] : address of receiver argument
4496 // sp[8] : function
4497 // Registers used over whole function:
4498 // t2 : allocated object (tagged)
4499 // t5 : mapped parameter count (tagged)
4500
4501 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4502 // a1 = parameter count (tagged)
4503
4504 // Check if the calling frame is an arguments adaptor frame.
4505 Label runtime;
4506 Label adaptor_frame, try_allocate;
4507 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4508 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4509 __ Branch(&adaptor_frame, eq, a2,
4510 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4511
4512 // No adaptor, parameter count = argument count.
4513 __ mov(a2, a1);
4514 __ b(&try_allocate);
4515 __ nop(); // Branch delay slot nop.
4516
4517 // We have an adaptor frame. Patch the parameters pointer.
4518 __ bind(&adaptor_frame);
4519 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4520 __ sll(t6, a2, 1);
4521 __ Addu(a3, a3, Operand(t6));
4522 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4523 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4524
4525 // a1 = parameter count (tagged)
4526 // a2 = argument count (tagged)
4527 // Compute the mapped parameter count = min(a1, a2) in a1.
4528 Label skip_min;
4529 __ Branch(&skip_min, lt, a1, Operand(a2));
4530 __ mov(a1, a2);
4531 __ bind(&skip_min);
4532
4533 __ bind(&try_allocate);
4534
4535 // Compute the sizes of backing store, parameter map, and arguments object.
4536 // 1. Parameter map, has 2 extra words containing context and backing store.
4537 const int kParameterMapHeaderSize =
4538 FixedArray::kHeaderSize + 2 * kPointerSize;
4539 // If there are no mapped parameters, we do not need the parameter_map.
4540 Label param_map_size;
4541 ASSERT_EQ(0, Smi::FromInt(0));
4542 __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4543 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4544 __ sll(t5, a1, 1);
4545 __ addiu(t5, t5, kParameterMapHeaderSize);
4546 __ bind(&param_map_size);
4547
4548 // 2. Backing store.
4549 __ sll(t6, a2, 1);
4550 __ Addu(t5, t5, Operand(t6));
4551 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4552
4553 // 3. Arguments object.
4554 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4555
4556 // Do the allocation of all three objects in one go.
4557 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4558
4559 // v0 = address of new object(s) (tagged)
4560 // a2 = argument count (tagged)
4561 // Get the arguments boilerplate from the current (global) context into t0.
4562 const int kNormalOffset =
4563 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4564 const int kAliasedOffset =
4565 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4566
4567 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4568 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4569 Label skip2_ne, skip2_eq;
4570 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4571 __ lw(t0, MemOperand(t0, kNormalOffset));
4572 __ bind(&skip2_ne);
4573
4574 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4575 __ lw(t0, MemOperand(t0, kAliasedOffset));
4576 __ bind(&skip2_eq);
4577
4578 // v0 = address of new object (tagged)
4579 // a1 = mapped parameter count (tagged)
4580 // a2 = argument count (tagged)
4581 // t0 = address of boilerplate object (tagged)
4582 // Copy the JS object part.
4583 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4584 __ lw(a3, FieldMemOperand(t0, i));
4585 __ sw(a3, FieldMemOperand(v0, i));
4586 }
4587
Ben Murdochc7cc0282012-03-05 14:35:55 +00004588 // Set up the callee in-object property.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004589 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4590 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4591 const int kCalleeOffset = JSObject::kHeaderSize +
4592 Heap::kArgumentsCalleeIndex * kPointerSize;
4593 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4594
4595 // Use the length (smi tagged) and set that as an in-object property too.
4596 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4597 const int kLengthOffset = JSObject::kHeaderSize +
4598 Heap::kArgumentsLengthIndex * kPointerSize;
4599 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4600
Ben Murdochc7cc0282012-03-05 14:35:55 +00004601 // Set up the elements pointer in the allocated arguments object.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004602 // If we allocated a parameter map, t0 will point there, otherwise
4603 // it will point to the backing store.
4604 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4605 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4606
4607 // v0 = address of new object (tagged)
4608 // a1 = mapped parameter count (tagged)
4609 // a2 = argument count (tagged)
4610 // t0 = address of parameter map or backing store (tagged)
4611 // Initialize parameter map. If there are no mapped arguments, we're done.
4612 Label skip_parameter_map;
4613 Label skip3;
4614 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4615 // Move backing store address to a3, because it is
4616 // expected there when filling in the unmapped arguments.
4617 __ mov(a3, t0);
4618 __ bind(&skip3);
4619
4620 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4621
4622 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4623 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4624 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4625 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4626 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4627 __ sll(t6, a1, 1);
4628 __ Addu(t2, t0, Operand(t6));
4629 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4630 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4631
4632 // Copy the parameter slots and the holes in the arguments.
4633 // We need to fill in mapped_parameter_count slots. They index the context,
4634 // where parameters are stored in reverse order, at
4635 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4636 // The mapped parameter thus need to get indices
4637 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4638 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4639 // We loop from right to left.
4640 Label parameters_loop, parameters_test;
4641 __ mov(t2, a1);
4642 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4643 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4644 __ Subu(t5, t5, Operand(a1));
4645 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4646 __ sll(t6, t2, 1);
4647 __ Addu(a3, t0, Operand(t6));
4648 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4649
4650 // t2 = loop variable (tagged)
4651 // a1 = mapping index (tagged)
4652 // a3 = address of backing store (tagged)
4653 // t0 = address of parameter map (tagged)
4654 // t1 = temporary scratch (a.o., for address calculation)
4655 // t3 = the hole value
4656 __ jmp(&parameters_test);
4657
4658 __ bind(&parameters_loop);
4659 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4660 __ sll(t1, t2, 1);
4661 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4662 __ Addu(t6, t0, t1);
4663 __ sw(t5, MemOperand(t6));
4664 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4665 __ Addu(t6, a3, t1);
4666 __ sw(t3, MemOperand(t6));
4667 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4668 __ bind(&parameters_test);
4669 __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4670
4671 __ bind(&skip_parameter_map);
4672 // a2 = argument count (tagged)
4673 // a3 = address of backing store (tagged)
4674 // t1 = scratch
4675 // Copy arguments header and remaining slots (if there are any).
4676 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4677 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4678 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4679
4680 Label arguments_loop, arguments_test;
4681 __ mov(t5, a1);
4682 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4683 __ sll(t6, t5, 1);
4684 __ Subu(t0, t0, Operand(t6));
4685 __ jmp(&arguments_test);
4686
4687 __ bind(&arguments_loop);
4688 __ Subu(t0, t0, Operand(kPointerSize));
4689 __ lw(t2, MemOperand(t0, 0));
4690 __ sll(t6, t5, 1);
4691 __ Addu(t1, a3, Operand(t6));
4692 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4693 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4694
4695 __ bind(&arguments_test);
4696 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4697
4698 // Return and remove the on-stack parameters.
4699 __ Addu(sp, sp, Operand(3 * kPointerSize));
4700 __ Ret();
4701
4702 // Do the runtime call to allocate the arguments object.
Ben Murdochc7cc0282012-03-05 14:35:55 +00004703 // a2 = argument count (tagged)
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004704 __ bind(&runtime);
4705 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4706 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4707}
4708
4709
4710void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4711 // sp[0] : number of parameters
4712 // sp[4] : receiver displacement
4713 // sp[8] : function
Ben Murdoch257744e2011-11-30 15:57:28 +00004714 // Check if the calling frame is an arguments adaptor frame.
4715 Label adaptor_frame, try_allocate, runtime;
4716 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4717 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4718 __ Branch(&adaptor_frame,
4719 eq,
4720 a3,
4721 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4722
4723 // Get the length from the frame.
4724 __ lw(a1, MemOperand(sp, 0));
4725 __ Branch(&try_allocate);
4726
4727 // Patch the arguments.length and the parameters pointer.
4728 __ bind(&adaptor_frame);
4729 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4730 __ sw(a1, MemOperand(sp, 0));
4731 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4732 __ Addu(a3, a2, Operand(at));
4733
4734 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4735 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4736
4737 // Try the new space allocation. Start out with computing the size
4738 // of the arguments object and the elements array in words.
4739 Label add_arguments_object;
4740 __ bind(&try_allocate);
4741 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4742 __ srl(a1, a1, kSmiTagSize);
4743
4744 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4745 __ bind(&add_arguments_object);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004746 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004747
4748 // Do the allocation of both objects in one go.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004749 __ AllocateInNewSpace(a1,
4750 v0,
4751 a2,
4752 a3,
4753 &runtime,
4754 static_cast<AllocationFlags>(TAG_OBJECT |
4755 SIZE_IN_WORDS));
Ben Murdoch257744e2011-11-30 15:57:28 +00004756
4757 // Get the arguments boilerplate from the current (global) context.
4758 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4759 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004760 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4761 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004762
4763 // Copy the JS object part.
4764 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4765
Ben Murdoch257744e2011-11-30 15:57:28 +00004766 // Get the length (smi tagged) and set that as an in-object property too.
4767 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4768 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4769 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004770 Heap::kArgumentsLengthIndex * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00004771
4772 Label done;
4773 __ Branch(&done, eq, a1, Operand(zero_reg));
4774
4775 // Get the parameters pointer from the stack.
4776 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4777
Ben Murdochc7cc0282012-03-05 14:35:55 +00004778 // Set up the elements pointer in the allocated arguments object and
Ben Murdoch257744e2011-11-30 15:57:28 +00004779 // initialize the header in the elements fixed array.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004780 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
Ben Murdoch257744e2011-11-30 15:57:28 +00004781 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4782 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4783 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4784 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004785 // Untag the length for the loop.
4786 __ srl(a1, a1, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00004787
4788 // Copy the fixed array slots.
4789 Label loop;
Ben Murdochc7cc0282012-03-05 14:35:55 +00004790 // Set up t0 to point to the first array slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00004791 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4792 __ bind(&loop);
4793 // Pre-decrement a2 with kPointerSize on each iteration.
4794 // Pre-decrement in order to skip receiver.
4795 __ Addu(a2, a2, Operand(-kPointerSize));
4796 __ lw(a3, MemOperand(a2));
4797 // Post-increment t0 with kPointerSize on each iteration.
4798 __ sw(a3, MemOperand(t0));
4799 __ Addu(t0, t0, Operand(kPointerSize));
4800 __ Subu(a1, a1, Operand(1));
4801 __ Branch(&loop, ne, a1, Operand(zero_reg));
4802
4803 // Return and remove the on-stack parameters.
4804 __ bind(&done);
4805 __ Addu(sp, sp, Operand(3 * kPointerSize));
4806 __ Ret();
4807
4808 // Do the runtime call to allocate the arguments object.
4809 __ bind(&runtime);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004810 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004811}
4812
4813
4814void RegExpExecStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004815 // Just jump directly to runtime if native RegExp is not selected at compile
4816 // time or if regexp entry in generated code is turned off runtime switch or
4817 // at compilation.
4818#ifdef V8_INTERPRETED_REGEXP
4819 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4820#else // V8_INTERPRETED_REGEXP
Ben Murdoch257744e2011-11-30 15:57:28 +00004821
4822 // Stack frame on entry.
4823 // sp[0]: last_match_info (expected JSArray)
4824 // sp[4]: previous index
4825 // sp[8]: subject string
4826 // sp[12]: JSRegExp object
4827
4828 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4829 static const int kPreviousIndexOffset = 1 * kPointerSize;
4830 static const int kSubjectOffset = 2 * kPointerSize;
4831 static const int kJSRegExpOffset = 3 * kPointerSize;
4832
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004833 Isolate* isolate = masm->isolate();
4834
Ben Murdoch257744e2011-11-30 15:57:28 +00004835 Label runtime, invoke_regexp;
4836
4837 // Allocation of registers for this function. These are in callee save
4838 // registers and will be preserved by the call to the native RegExp code, as
4839 // this code is called using the normal C calling convention. When calling
4840 // directly from generated code the native RegExp code will not do a GC and
4841 // therefore the content of these registers are safe to use after the call.
4842 // MIPS - using s0..s2, since we are not using CEntry Stub.
4843 Register subject = s0;
4844 Register regexp_data = s1;
4845 Register last_match_info_elements = s2;
4846
4847 // Ensure that a RegExp stack is allocated.
4848 ExternalReference address_of_regexp_stack_memory_address =
4849 ExternalReference::address_of_regexp_stack_memory_address(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004850 isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004851 ExternalReference address_of_regexp_stack_memory_size =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004852 ExternalReference::address_of_regexp_stack_memory_size(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00004853 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4854 __ lw(a0, MemOperand(a0, 0));
4855 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4856
4857 // Check that the first argument is a JSRegExp object.
4858 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4859 STATIC_ASSERT(kSmiTag == 0);
4860 __ JumpIfSmi(a0, &runtime);
4861 __ GetObjectType(a0, a1, a1);
4862 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4863
4864 // Check that the RegExp has been compiled (data contains a fixed array).
4865 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4866 if (FLAG_debug_code) {
4867 __ And(t0, regexp_data, Operand(kSmiTagMask));
4868 __ Check(nz,
4869 "Unexpected type for RegExp data, FixedArray expected",
4870 t0,
4871 Operand(zero_reg));
4872 __ GetObjectType(regexp_data, a0, a0);
4873 __ Check(eq,
4874 "Unexpected type for RegExp data, FixedArray expected",
4875 a0,
4876 Operand(FIXED_ARRAY_TYPE));
4877 }
4878
4879 // regexp_data: RegExp data (FixedArray)
4880 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4881 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4882 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4883
4884 // regexp_data: RegExp data (FixedArray)
4885 // Check that the number of captures fit in the static offsets vector buffer.
4886 __ lw(a2,
4887 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4888 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4889 // uses the asumption that smis are 2 * their untagged value.
4890 STATIC_ASSERT(kSmiTag == 0);
4891 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4892 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4893 // Check that the static offsets vector buffer is large enough.
4894 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4895
4896 // a2: Number of capture registers
4897 // regexp_data: RegExp data (FixedArray)
4898 // Check that the second argument is a string.
4899 __ lw(subject, MemOperand(sp, kSubjectOffset));
4900 __ JumpIfSmi(subject, &runtime);
4901 __ GetObjectType(subject, a0, a0);
4902 __ And(a0, a0, Operand(kIsNotStringMask));
4903 STATIC_ASSERT(kStringTag == 0);
4904 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4905
4906 // Get the length of the string to r3.
4907 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4908
4909 // a2: Number of capture registers
4910 // a3: Length of subject string as a smi
4911 // subject: Subject string
4912 // regexp_data: RegExp data (FixedArray)
4913 // Check that the third argument is a positive smi less than the subject
4914 // string length. A negative value will be greater (unsigned comparison).
4915 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004916 __ JumpIfNotSmi(a0, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00004917 __ Branch(&runtime, ls, a3, Operand(a0));
4918
4919 // a2: Number of capture registers
4920 // subject: Subject string
4921 // regexp_data: RegExp data (FixedArray)
4922 // Check that the fourth object is a JSArray object.
4923 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4924 __ JumpIfSmi(a0, &runtime);
4925 __ GetObjectType(a0, a1, a1);
4926 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4927 // Check that the JSArray is in fast case.
4928 __ lw(last_match_info_elements,
4929 FieldMemOperand(a0, JSArray::kElementsOffset));
4930 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4931 __ Branch(&runtime, ne, a0, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004932 isolate->factory()->fixed_array_map()));
Ben Murdoch257744e2011-11-30 15:57:28 +00004933 // Check that the last match info has space for the capture registers and the
4934 // additional information.
4935 __ lw(a0,
4936 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4937 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4938 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4939 __ Branch(&runtime, gt, a2, Operand(at));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004940
4941 // Reset offset for possibly sliced string.
4942 __ mov(t0, zero_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00004943 // subject: Subject string
4944 // regexp_data: RegExp data (FixedArray)
4945 // Check the representation and encoding of the subject string.
4946 Label seq_string;
4947 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4948 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004949 // First check for flat string. None of the following string type tests will
4950 // succeed if subject is not a string or a short external string.
4951 __ And(a1,
4952 a0,
4953 Operand(kIsNotStringMask |
4954 kStringRepresentationMask |
4955 kShortExternalStringMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00004956 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004957 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00004958
4959 // subject: Subject string
4960 // a0: instance type if Subject string
4961 // regexp_data: RegExp data (FixedArray)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004962 // a1: whether subject is a string and if yes, its string representation
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004963 // Check for flat cons string or sliced string.
Ben Murdoch257744e2011-11-30 15:57:28 +00004964 // A flat cons string is a cons string where the second part is the empty
4965 // string. In that case the subject string is just the first part of the cons
4966 // string. Also in this case the first part of the cons string is known to be
4967 // a sequential string or an external string.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004968 // In the case of a sliced string its offset has to be taken into account.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004969 Label cons_string, external_string, check_encoding;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004970 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4971 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004972 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4973 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004974 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004975 __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
4976
4977 // Catch non-string subject or short external string.
4978 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4979 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
4980 __ Branch(&runtime, ne, at, Operand(zero_reg));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004981
4982 // String is sliced.
4983 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4984 __ sra(t0, t0, kSmiTagSize);
4985 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4986 // t5: offset of sliced string, smi-tagged.
4987 __ jmp(&check_encoding);
4988 // String is a cons string, check whether it is flat.
4989 __ bind(&cons_string);
Ben Murdoch257744e2011-11-30 15:57:28 +00004990 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4991 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4992 __ Branch(&runtime, ne, a0, Operand(a1));
4993 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004994 // Is first part of cons or parent of slice a flat string?
4995 __ bind(&check_encoding);
Ben Murdoch257744e2011-11-30 15:57:28 +00004996 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4997 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004998 STATIC_ASSERT(kSeqStringTag == 0);
4999 __ And(at, a0, Operand(kStringRepresentationMask));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005000 __ Branch(&external_string, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00005001
5002 __ bind(&seq_string);
5003 // subject: Subject string
5004 // regexp_data: RegExp data (FixedArray)
5005 // a0: Instance type of subject string
5006 STATIC_ASSERT(kStringEncodingMask == 4);
5007 STATIC_ASSERT(kAsciiStringTag == 4);
5008 STATIC_ASSERT(kTwoByteStringTag == 0);
5009 // Find the code object based on the assumptions above.
Ben Murdochc7cc0282012-03-05 14:35:55 +00005010 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
Ben Murdoch257744e2011-11-30 15:57:28 +00005011 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
Ben Murdochc7cc0282012-03-05 14:35:55 +00005012 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005013 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5014 __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
Ben Murdoch257744e2011-11-30 15:57:28 +00005015
5016 // Check that the irregexp code has been generated for the actual string
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005017 // encoding. If it has, the field contains a code object otherwise it contains
5018 // a smi (code flushing support).
5019 __ JumpIfSmi(t9, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00005020
5021 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5022 // t9: code
5023 // subject: Subject string
5024 // regexp_data: RegExp data (FixedArray)
5025 // Load used arguments before starting to push arguments for call to native
5026 // RegExp code to avoid handling changing stack height.
5027 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5028 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
5029
5030 // a1: previous index
5031 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5032 // t9: code
5033 // subject: Subject string
5034 // regexp_data: RegExp data (FixedArray)
5035 // All checks done. Now push arguments for native regexp code.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005036 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
Ben Murdoch257744e2011-11-30 15:57:28 +00005037 1, a0, a2);
5038
5039 // Isolates: note we add an additional parameter here (isolate pointer).
5040 static const int kRegExpExecuteArguments = 8;
5041 static const int kParameterRegisters = 4;
5042 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5043
5044 // Stack pointer now points to cell where return address is to be written.
5045 // Arguments are before that on the stack or in registers, meaning we
5046 // treat the return address as argument 5. Thus every argument after that
5047 // needs to be shifted back by 1. Since DirectCEntryStub will handle
5048 // allocating space for the c argument slots, we don't need to calculate
5049 // that into the argument positions on the stack. This is how the stack will
5050 // look (sp meaning the value of sp at this moment):
5051 // [sp + 4] - Argument 8
5052 // [sp + 3] - Argument 7
5053 // [sp + 2] - Argument 6
5054 // [sp + 1] - Argument 5
5055 // [sp + 0] - saved ra
5056
5057 // Argument 8: Pass current isolate address.
5058 // CFunctionArgumentOperand handles MIPS stack argument slots.
5059 __ li(a0, Operand(ExternalReference::isolate_address()));
5060 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5061
5062 // Argument 7: Indicate that this is a direct call from JavaScript.
5063 __ li(a0, Operand(1));
5064 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5065
5066 // Argument 6: Start (high end) of backtracking stack memory area.
5067 __ li(a0, Operand(address_of_regexp_stack_memory_address));
5068 __ lw(a0, MemOperand(a0, 0));
5069 __ li(a2, Operand(address_of_regexp_stack_memory_size));
5070 __ lw(a2, MemOperand(a2, 0));
5071 __ addu(a0, a0, a2);
5072 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5073
5074 // Argument 5: static offsets vector buffer.
5075 __ li(a0, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005076 ExternalReference::address_of_static_offsets_vector(isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00005077 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5078
5079 // For arguments 4 and 3 get string length, calculate start of string data
5080 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005081 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00005082 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005083 // Load the length from the original subject string from the previous stack
5084 // frame. Therefore we have to use fp, which points exactly to two pointer
5085 // sizes below the previous sp. (Because creating a new stack frame pushes
5086 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
Ben Murdoch589d6972011-11-30 16:04:58 +00005087 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005088 // If slice offset is not 0, load the length from the original sliced string.
5089 // Argument 4, a3: End of string data
5090 // Argument 3, a2: Start of string data
5091 // Prepare start and end index of the input.
5092 __ sllv(t1, t0, a3);
5093 __ addu(t0, t2, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00005094 __ sllv(t1, a1, a3);
5095 __ addu(a2, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00005096
Ben Murdoch589d6972011-11-30 16:04:58 +00005097 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005098 __ sra(t2, t2, kSmiTagSize);
5099 __ sllv(t1, t2, a3);
5100 __ addu(a3, t0, t1);
Ben Murdoch257744e2011-11-30 15:57:28 +00005101 // Argument 2 (a1): Previous index.
5102 // Already there
5103
5104 // Argument 1 (a0): Subject string.
Ben Murdoch589d6972011-11-30 16:04:58 +00005105 __ mov(a0, subject);
Ben Murdoch257744e2011-11-30 15:57:28 +00005106
5107 // Locate the code entry and call it.
5108 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5109 DirectCEntryStub stub;
5110 stub.GenerateCall(masm, t9);
5111
5112 __ LeaveExitFrame(false, no_reg);
5113
5114 // v0: result
5115 // subject: subject string (callee saved)
5116 // regexp_data: RegExp data (callee saved)
5117 // last_match_info_elements: Last match info elements (callee saved)
5118
5119 // Check the result.
5120
5121 Label success;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005122 __ Branch(&success, eq,
Ben Murdoch589d6972011-11-30 16:04:58 +00005123 v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Ben Murdoch257744e2011-11-30 15:57:28 +00005124 Label failure;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005125 __ Branch(&failure, eq,
Ben Murdoch589d6972011-11-30 16:04:58 +00005126 v0, Operand(NativeRegExpMacroAssembler::FAILURE));
Ben Murdoch257744e2011-11-30 15:57:28 +00005127 // If not exception it can only be retry. Handle that in the runtime system.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005128 __ Branch(&runtime, ne,
Ben Murdoch589d6972011-11-30 16:04:58 +00005129 v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
Ben Murdoch257744e2011-11-30 15:57:28 +00005130 // Result must now be exception. If there is no pending exception already a
5131 // stack overflow (on the backtrack stack) was detected in RegExp code but
5132 // haven't created the exception yet. Handle that in the runtime system.
5133 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005134 __ li(a1, Operand(isolate->factory()->the_hole_value()));
Ben Murdoch589d6972011-11-30 16:04:58 +00005135 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005136 isolate)));
Ben Murdoch257744e2011-11-30 15:57:28 +00005137 __ lw(v0, MemOperand(a2, 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00005138 __ Branch(&runtime, eq, v0, Operand(a1));
Ben Murdoch257744e2011-11-30 15:57:28 +00005139
5140 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5141
5142 // Check if the exception is a termination. If so, throw as uncatchable.
5143 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5144 Label termination_exception;
Ben Murdoch589d6972011-11-30 16:04:58 +00005145 __ Branch(&termination_exception, eq, v0, Operand(a0));
Ben Murdoch257744e2011-11-30 15:57:28 +00005146
Ben Murdoch589d6972011-11-30 16:04:58 +00005147 __ Throw(v0); // Expects thrown value in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00005148
5149 __ bind(&termination_exception);
5150 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
5151
5152 __ bind(&failure);
5153 // For failure and exception return null.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005154 __ li(v0, Operand(isolate->factory()->null_value()));
Ben Murdoch257744e2011-11-30 15:57:28 +00005155 __ Addu(sp, sp, Operand(4 * kPointerSize));
5156 __ Ret();
5157
5158 // Process the result from the native regexp code.
5159 __ bind(&success);
5160 __ lw(a1,
5161 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5162 // Calculate number of capture registers (number_of_captures + 1) * 2.
5163 STATIC_ASSERT(kSmiTag == 0);
5164 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5165 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5166
5167 // a1: number of capture registers
5168 // subject: subject string
5169 // Store the capture count.
5170 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5171 __ sw(a2, FieldMemOperand(last_match_info_elements,
5172 RegExpImpl::kLastCaptureCountOffset));
5173 // Store last subject and last input.
Ben Murdoch257744e2011-11-30 15:57:28 +00005174 __ sw(subject,
5175 FieldMemOperand(last_match_info_elements,
5176 RegExpImpl::kLastSubjectOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005177 __ mov(a2, subject);
5178 __ RecordWriteField(last_match_info_elements,
5179 RegExpImpl::kLastSubjectOffset,
5180 a2,
5181 t3,
5182 kRAHasNotBeenSaved,
5183 kDontSaveFPRegs);
Ben Murdoch257744e2011-11-30 15:57:28 +00005184 __ sw(subject,
5185 FieldMemOperand(last_match_info_elements,
5186 RegExpImpl::kLastInputOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005187 __ RecordWriteField(last_match_info_elements,
5188 RegExpImpl::kLastInputOffset,
5189 subject,
5190 t3,
5191 kRAHasNotBeenSaved,
5192 kDontSaveFPRegs);
Ben Murdoch257744e2011-11-30 15:57:28 +00005193
5194 // Get the static offsets vector filled by the native regexp code.
5195 ExternalReference address_of_static_offsets_vector =
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005196 ExternalReference::address_of_static_offsets_vector(isolate);
Ben Murdoch257744e2011-11-30 15:57:28 +00005197 __ li(a2, Operand(address_of_static_offsets_vector));
5198
5199 // a1: number of capture registers
5200 // a2: offsets vector
5201 Label next_capture, done;
5202 // Capture register counter starts from number of capture registers and
5203 // counts down until wrapping after zero.
5204 __ Addu(a0,
5205 last_match_info_elements,
5206 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5207 __ bind(&next_capture);
5208 __ Subu(a1, a1, Operand(1));
5209 __ Branch(&done, lt, a1, Operand(zero_reg));
5210 // Read the value from the static offsets vector buffer.
5211 __ lw(a3, MemOperand(a2, 0));
5212 __ addiu(a2, a2, kPointerSize);
5213 // Store the smi value in the last match info.
5214 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5215 __ sw(a3, MemOperand(a0, 0));
5216 __ Branch(&next_capture, USE_DELAY_SLOT);
5217 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5218
5219 __ bind(&done);
5220
5221 // Return last match info.
5222 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5223 __ Addu(sp, sp, Operand(4 * kPointerSize));
5224 __ Ret();
5225
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005226 // External string. Short external strings have already been ruled out.
5227 // a0: scratch
5228 __ bind(&external_string);
5229 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5230 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5231 if (FLAG_debug_code) {
5232 // Assert that we do not have a cons or slice (indirect strings) here.
5233 // Sequential strings have already been ruled out.
5234 __ And(at, a0, Operand(kIsIndirectStringMask));
5235 __ Assert(eq,
5236 "external string expected, but not found",
5237 at,
5238 Operand(zero_reg));
5239 }
5240 __ lw(subject,
5241 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5242 // Move the pointer so that offset-wise, it looks like a sequential string.
5243 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5244 __ Subu(subject,
5245 subject,
5246 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
5247 __ jmp(&seq_string);
5248
Ben Murdoch257744e2011-11-30 15:57:28 +00005249 // Do the runtime call to execute the regexp.
5250 __ bind(&runtime);
5251 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5252#endif // V8_INTERPRETED_REGEXP
Steve Block44f0eee2011-05-26 01:26:41 +01005253}
5254
5255
5256void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005257 const int kMaxInlineLength = 100;
5258 Label slowcase;
5259 Label done;
5260 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5261 STATIC_ASSERT(kSmiTag == 0);
5262 STATIC_ASSERT(kSmiTagSize == 1);
5263 __ JumpIfNotSmi(a1, &slowcase);
5264 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5265 // Smi-tagging is equivalent to multiplying by 2.
5266 // Allocate RegExpResult followed by FixedArray with size in ebx.
5267 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5268 // Elements: [Map][Length][..elements..]
5269 // Size of JSArray with two in-object properties and the header of a
5270 // FixedArray.
5271 int objects_size =
5272 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5273 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5274 __ Addu(a2, t1, Operand(objects_size));
5275 __ AllocateInNewSpace(
5276 a2, // In: Size, in words.
5277 v0, // Out: Start of allocation (tagged).
5278 a3, // Scratch register.
5279 t0, // Scratch register.
5280 &slowcase,
5281 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5282 // v0: Start of allocated area, object-tagged.
5283 // a1: Number of elements in array, as smi.
5284 // t1: Number of elements, untagged.
5285
5286 // Set JSArray map to global.regexp_result_map().
5287 // Set empty properties FixedArray.
5288 // Set elements to point to FixedArray allocated right after the JSArray.
5289 // Interleave operations for better latency.
5290 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5291 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5292 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5293 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5294 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5295 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5296 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5297 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5298
5299 // Set input, index and length fields from arguments.
5300 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5301 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5302 __ lw(a1, MemOperand(sp, kPointerSize * 1));
5303 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5304 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5305 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
5306
5307 // Fill out the elements FixedArray.
5308 // v0: JSArray, tagged.
5309 // a3: FixedArray, tagged.
5310 // t1: Number of elements in array, untagged.
5311
5312 // Set map.
5313 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5314 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5315 // Set FixedArray length.
5316 __ sll(t2, t1, kSmiTagSize);
5317 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5318 // Fill contents of fixed-array with the-hole.
5319 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5320 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5321 // Fill fixed array elements with hole.
5322 // v0: JSArray, tagged.
5323 // a2: the hole.
5324 // a3: Start of elements in FixedArray.
5325 // t1: Number of elements to fill.
5326 Label loop;
5327 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5328 __ addu(t1, t1, a3); // Point past last element to store.
5329 __ bind(&loop);
5330 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5331 __ sw(a2, MemOperand(a3));
5332 __ Branch(&loop, USE_DELAY_SLOT);
5333 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5334
5335 __ bind(&done);
5336 __ Addu(sp, sp, Operand(3 * kPointerSize));
5337 __ Ret();
5338
5339 __ bind(&slowcase);
5340 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01005341}
5342
5343
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005344void CallFunctionStub::FinishCode(Handle<Code> code) {
5345 code->set_has_function_cache(false);
5346}
5347
5348
5349void CallFunctionStub::Clear(Heap* heap, Address address) {
5350 UNREACHABLE();
5351}
5352
5353
5354Object* CallFunctionStub::GetCachedValue(Address address) {
5355 UNREACHABLE();
5356 return NULL;
5357}
5358
5359
Steve Block44f0eee2011-05-26 01:26:41 +01005360void CallFunctionStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005361 // a1 : the function to call
5362 Label slow, non_function;
Ben Murdoch257744e2011-11-30 15:57:28 +00005363
5364 // The receiver might implicitly be the global object. This is
5365 // indicated by passing the hole as the receiver to the call
5366 // function stub.
5367 if (ReceiverMightBeImplicit()) {
5368 Label call;
5369 // Get the receiver from the stack.
5370 // function, receiver [, arguments]
5371 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5372 // Call as function is indicated with the hole.
5373 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5374 __ Branch(&call, ne, t0, Operand(at));
5375 // Patch the receiver on the stack with the global receiver object.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005376 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5377 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
5378 __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
Ben Murdoch257744e2011-11-30 15:57:28 +00005379 __ bind(&call);
5380 }
5381
Ben Murdoch257744e2011-11-30 15:57:28 +00005382 // Check that the function is really a JavaScript function.
5383 // a1: pushed function (to be verified)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005384 __ JumpIfSmi(a1, &non_function);
Ben Murdoch257744e2011-11-30 15:57:28 +00005385 // Get the map of the function object.
5386 __ GetObjectType(a1, a2, a2);
5387 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
5388
5389 // Fast-case: Invoke the function now.
5390 // a1: pushed function
5391 ParameterCount actual(argc_);
5392
5393 if (ReceiverMightBeImplicit()) {
5394 Label call_as_function;
5395 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5396 __ Branch(&call_as_function, eq, t0, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005397 __ InvokeFunction(a1,
5398 actual,
5399 JUMP_FUNCTION,
5400 NullCallWrapper(),
5401 CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00005402 __ bind(&call_as_function);
5403 }
5404 __ InvokeFunction(a1,
5405 actual,
5406 JUMP_FUNCTION,
5407 NullCallWrapper(),
5408 CALL_AS_FUNCTION);
5409
5410 // Slow-case: Non-function called.
5411 __ bind(&slow);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005412 // Check for function proxy.
5413 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5414 __ push(a1); // Put proxy as additional argument.
5415 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5416 __ li(a2, Operand(0, RelocInfo::NONE));
5417 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5418 __ SetCallKind(t1, CALL_AS_METHOD);
5419 {
5420 Handle<Code> adaptor =
5421 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5422 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5423 }
5424
Ben Murdoch257744e2011-11-30 15:57:28 +00005425 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5426 // of the original receiver from the call site).
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005427 __ bind(&non_function);
Ben Murdoch257744e2011-11-30 15:57:28 +00005428 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
Ben Murdochc7cc0282012-03-05 14:35:55 +00005429 __ li(a0, Operand(argc_)); // Set up the number of arguments.
Ben Murdoch257744e2011-11-30 15:57:28 +00005430 __ mov(a2, zero_reg);
5431 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005432 __ SetCallKind(t1, CALL_AS_METHOD);
Ben Murdoch257744e2011-11-30 15:57:28 +00005433 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5434 RelocInfo::CODE_TARGET);
Steve Block44f0eee2011-05-26 01:26:41 +01005435}
5436
5437
5438// Unfortunately you have to run without snapshots to see most of these
5439// names in the profile since most compare stubs end up in the snapshot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005440void CompareStub::PrintName(StringStream* stream) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005441 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5442 (lhs_.is(a1) && rhs_.is(a0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00005443 const char* cc_name;
5444 switch (cc_) {
5445 case lt: cc_name = "LT"; break;
5446 case gt: cc_name = "GT"; break;
5447 case le: cc_name = "LE"; break;
5448 case ge: cc_name = "GE"; break;
5449 case eq: cc_name = "EQ"; break;
5450 case ne: cc_name = "NE"; break;
5451 default: cc_name = "UnknownCondition"; break;
5452 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00005453 bool is_equality = cc_ == eq || cc_ == ne;
5454 stream->Add("CompareStub_%s", cc_name);
5455 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5456 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5457 if (strict_ && is_equality) stream->Add("_STRICT");
5458 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5459 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5460 if (!include_smi_compare_) stream->Add("_NO_SMI");
Steve Block44f0eee2011-05-26 01:26:41 +01005461}
5462
5463
5464int CompareStub::MinorKey() {
Ben Murdoch257744e2011-11-30 15:57:28 +00005465 // Encode the two parameters in a unique 16 bit value.
5466 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5467 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5468 (lhs_.is(a1) && rhs_.is(a0)));
5469 return ConditionField::encode(static_cast<unsigned>(cc_))
5470 | RegisterField::encode(lhs_.is(a0))
5471 | StrictField::encode(strict_)
5472 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5473 | IncludeSmiCompareField::encode(include_smi_compare_);
Steve Block44f0eee2011-05-26 01:26:41 +01005474}
5475
5476
Ben Murdoch257744e2011-11-30 15:57:28 +00005477// StringCharCodeAtGenerator.
Steve Block44f0eee2011-05-26 01:26:41 +01005478void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005479 Label flat_string;
5480 Label ascii_string;
5481 Label got_char_code;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00005482 Label sliced_string;
Ben Murdoch257744e2011-11-30 15:57:28 +00005483
Ben Murdoch257744e2011-11-30 15:57:28 +00005484 ASSERT(!t0.is(index_));
5485 ASSERT(!t0.is(result_));
5486 ASSERT(!t0.is(object_));
5487
5488 // If the receiver is a smi trigger the non-string case.
5489 __ JumpIfSmi(object_, receiver_not_string_);
5490
5491 // Fetch the instance type of the receiver into result register.
5492 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5493 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5494 // If the receiver is not a string trigger the non-string case.
5495 __ And(t0, result_, Operand(kIsNotStringMask));
5496 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5497
5498 // If the index is non-smi trigger the non-smi case.
5499 __ JumpIfNotSmi(index_, &index_not_smi_);
5500
Ben Murdoch257744e2011-11-30 15:57:28 +00005501 __ bind(&got_smi_index_);
5502
5503 // Check for index out of range.
5504 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005505 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
Ben Murdoch257744e2011-11-30 15:57:28 +00005506
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005507 __ sra(index_, index_, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00005508
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005509 StringCharLoadGenerator::Generate(masm,
5510 object_,
5511 index_,
5512 result_,
5513 &call_runtime_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005514
Ben Murdoch257744e2011-11-30 15:57:28 +00005515 __ sll(result_, result_, kSmiTagSize);
5516 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005517}
5518
5519
5520void StringCharCodeAtGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005521 MacroAssembler* masm,
5522 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005523 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5524
5525 // Index is not a smi.
5526 __ bind(&index_not_smi_);
5527 // If index is a heap number, try converting it to an integer.
5528 __ CheckMap(index_,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005529 result_,
Ben Murdoch257744e2011-11-30 15:57:28 +00005530 Heap::kHeapNumberMapRootIndex,
5531 index_not_number_,
5532 DONT_DO_SMI_CHECK);
5533 call_helper.BeforeCall(masm);
5534 // Consumed by runtime conversion function:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005535 __ Push(object_, index_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005536 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5537 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5538 } else {
5539 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5540 // NumberToSmi discards numbers that are not exact integers.
5541 __ CallRuntime(Runtime::kNumberToSmi, 1);
5542 }
5543
5544 // Save the conversion result before the pop instructions below
5545 // have a chance to overwrite it.
5546
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005547 __ Move(index_, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00005548 __ pop(object_);
5549 // Reload the instance type.
5550 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5551 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5552 call_helper.AfterCall(masm);
5553 // If index is still not a smi, it must be out of range.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005554 __ JumpIfNotSmi(index_, index_out_of_range_);
Ben Murdoch257744e2011-11-30 15:57:28 +00005555 // Otherwise, return to the fast path.
5556 __ Branch(&got_smi_index_);
5557
5558 // Call runtime. We get here when the receiver is a string and the
5559 // index is a number, but the code of getting the actual character
5560 // is too complex (e.g., when the string needs to be flattened).
5561 __ bind(&call_runtime_);
5562 call_helper.BeforeCall(masm);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005563 __ sll(index_, index_, kSmiTagSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00005564 __ Push(object_, index_);
5565 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5566
5567 __ Move(result_, v0);
5568
5569 call_helper.AfterCall(masm);
5570 __ jmp(&exit_);
5571
5572 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005573}
5574
5575
5576// -------------------------------------------------------------------------
5577// StringCharFromCodeGenerator
5578
5579void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005580 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5581
5582 ASSERT(!t0.is(result_));
5583 ASSERT(!t0.is(code_));
5584
5585 STATIC_ASSERT(kSmiTag == 0);
5586 STATIC_ASSERT(kSmiShiftSize == 0);
5587 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5588 __ And(t0,
5589 code_,
5590 Operand(kSmiTagMask |
5591 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5592 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5593
5594 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5595 // At this point code register contains smi tagged ASCII char code.
5596 STATIC_ASSERT(kSmiTag == 0);
5597 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5598 __ Addu(result_, result_, t0);
5599 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5600 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5601 __ Branch(&slow_case_, eq, result_, Operand(t0));
5602 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01005603}
5604
5605
5606void StringCharFromCodeGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005607 MacroAssembler* masm,
5608 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005609 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5610
5611 __ bind(&slow_case_);
5612 call_helper.BeforeCall(masm);
5613 __ push(code_);
5614 __ CallRuntime(Runtime::kCharFromCode, 1);
5615 __ Move(result_, v0);
5616
5617 call_helper.AfterCall(masm);
5618 __ Branch(&exit_);
5619
5620 __ Abort("Unexpected fallthrough from CharFromCode slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01005621}
5622
5623
5624// -------------------------------------------------------------------------
5625// StringCharAtGenerator
5626
5627void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005628 char_code_at_generator_.GenerateFast(masm);
5629 char_from_code_generator_.GenerateFast(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01005630}
5631
5632
5633void StringCharAtGenerator::GenerateSlow(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005634 MacroAssembler* masm,
5635 const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005636 char_code_at_generator_.GenerateSlow(masm, call_helper);
5637 char_from_code_generator_.GenerateSlow(masm, call_helper);
Steve Block44f0eee2011-05-26 01:26:41 +01005638}
5639
5640
Steve Block44f0eee2011-05-26 01:26:41 +01005641void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5642 Register dest,
5643 Register src,
5644 Register count,
5645 Register scratch,
5646 bool ascii) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005647 Label loop;
5648 Label done;
5649 // This loop just copies one character at a time, as it is only used for
5650 // very short strings.
5651 if (!ascii) {
5652 __ addu(count, count, count);
5653 }
5654 __ Branch(&done, eq, count, Operand(zero_reg));
5655 __ addu(count, dest, count); // Count now points to the last dest byte.
5656
5657 __ bind(&loop);
5658 __ lbu(scratch, MemOperand(src));
5659 __ addiu(src, src, 1);
5660 __ sb(scratch, MemOperand(dest));
5661 __ addiu(dest, dest, 1);
5662 __ Branch(&loop, lt, dest, Operand(count));
5663
5664 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005665}
5666
5667
5668enum CopyCharactersFlags {
5669 COPY_ASCII = 1,
5670 DEST_ALWAYS_ALIGNED = 2
5671};
5672
5673
5674void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5675 Register dest,
5676 Register src,
5677 Register count,
5678 Register scratch1,
5679 Register scratch2,
5680 Register scratch3,
5681 Register scratch4,
5682 Register scratch5,
5683 int flags) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005684 bool ascii = (flags & COPY_ASCII) != 0;
5685 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5686
5687 if (dest_always_aligned && FLAG_debug_code) {
5688 // Check that destination is actually word aligned if the flag says
5689 // that it is.
5690 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5691 __ Check(eq,
5692 "Destination of copy not aligned.",
5693 scratch4,
5694 Operand(zero_reg));
5695 }
5696
5697 const int kReadAlignment = 4;
5698 const int kReadAlignmentMask = kReadAlignment - 1;
5699 // Ensure that reading an entire aligned word containing the last character
5700 // of a string will not read outside the allocated area (because we pad up
5701 // to kObjectAlignment).
5702 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5703 // Assumes word reads and writes are little endian.
5704 // Nothing to do for zero characters.
5705 Label done;
5706
5707 if (!ascii) {
5708 __ addu(count, count, count);
5709 }
5710 __ Branch(&done, eq, count, Operand(zero_reg));
5711
5712 Label byte_loop;
5713 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5714 __ Subu(scratch1, count, Operand(8));
5715 __ Addu(count, dest, Operand(count));
5716 Register limit = count; // Read until src equals this.
5717 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5718
5719 if (!dest_always_aligned) {
5720 // Align dest by byte copying. Copies between zero and three bytes.
5721 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5722 Label dest_aligned;
5723 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5724 Label aligned_loop;
5725 __ bind(&aligned_loop);
5726 __ lbu(scratch1, MemOperand(src));
5727 __ addiu(src, src, 1);
5728 __ sb(scratch1, MemOperand(dest));
5729 __ addiu(dest, dest, 1);
5730 __ addiu(scratch4, scratch4, 1);
5731 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5732 __ bind(&dest_aligned);
5733 }
5734
5735 Label simple_loop;
5736
5737 __ And(scratch4, src, Operand(kReadAlignmentMask));
5738 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5739
5740 // Loop for src/dst that are not aligned the same way.
5741 // This loop uses lwl and lwr instructions. These instructions
5742 // depend on the endianness, and the implementation assumes little-endian.
5743 {
5744 Label loop;
5745 __ bind(&loop);
5746 __ lwr(scratch1, MemOperand(src));
5747 __ Addu(src, src, Operand(kReadAlignment));
5748 __ lwl(scratch1, MemOperand(src, -1));
5749 __ sw(scratch1, MemOperand(dest));
5750 __ Addu(dest, dest, Operand(kReadAlignment));
5751 __ Subu(scratch2, limit, dest);
5752 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5753 }
5754
5755 __ Branch(&byte_loop);
5756
5757 // Simple loop.
5758 // Copy words from src to dest, until less than four bytes left.
5759 // Both src and dest are word aligned.
5760 __ bind(&simple_loop);
5761 {
5762 Label loop;
5763 __ bind(&loop);
5764 __ lw(scratch1, MemOperand(src));
5765 __ Addu(src, src, Operand(kReadAlignment));
5766 __ sw(scratch1, MemOperand(dest));
5767 __ Addu(dest, dest, Operand(kReadAlignment));
5768 __ Subu(scratch2, limit, dest);
5769 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5770 }
5771
5772 // Copy bytes from src to dest until dest hits limit.
5773 __ bind(&byte_loop);
5774 // Test if dest has already reached the limit.
5775 __ Branch(&done, ge, dest, Operand(limit));
5776 __ lbu(scratch1, MemOperand(src));
5777 __ addiu(src, src, 1);
5778 __ sb(scratch1, MemOperand(dest));
5779 __ addiu(dest, dest, 1);
5780 __ Branch(&byte_loop);
5781
5782 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005783}
5784
5785
5786void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5787 Register c1,
5788 Register c2,
5789 Register scratch1,
5790 Register scratch2,
5791 Register scratch3,
5792 Register scratch4,
5793 Register scratch5,
5794 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005795 // Register scratch3 is the general scratch register in this function.
5796 Register scratch = scratch3;
5797
5798 // Make sure that both characters are not digits as such strings has a
5799 // different hash algorithm. Don't try to look for these in the symbol table.
5800 Label not_array_index;
5801 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5802 __ Branch(&not_array_index,
5803 Ugreater,
5804 scratch,
5805 Operand(static_cast<int>('9' - '0')));
5806 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5807
5808 // If check failed combine both characters into single halfword.
5809 // This is required by the contract of the method: code at the
5810 // not_found branch expects this combination in c1 register.
5811 Label tmp;
5812 __ sll(scratch1, c2, kBitsPerByte);
5813 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5814 __ Or(c1, c1, scratch1);
5815 __ bind(&tmp);
5816 __ Branch(not_found,
5817 Uless_equal,
5818 scratch,
5819 Operand(static_cast<int>('9' - '0')));
5820
5821 __ bind(&not_array_index);
5822 // Calculate the two character string hash.
5823 Register hash = scratch1;
5824 StringHelper::GenerateHashInit(masm, hash, c1);
5825 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5826 StringHelper::GenerateHashGetHash(masm, hash);
5827
5828 // Collect the two characters in a register.
5829 Register chars = c1;
5830 __ sll(scratch, c2, kBitsPerByte);
5831 __ Or(chars, chars, scratch);
5832
5833 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5834 // hash: hash of two character string.
5835
5836 // Load symbol table.
5837 // Load address of first element of the symbol table.
5838 Register symbol_table = c2;
5839 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5840
5841 Register undefined = scratch4;
5842 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5843
5844 // Calculate capacity mask from the symbol table capacity.
5845 Register mask = scratch2;
5846 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5847 __ sra(mask, mask, 1);
5848 __ Addu(mask, mask, -1);
5849
5850 // Calculate untagged address of the first element of the symbol table.
5851 Register first_symbol_table_element = symbol_table;
5852 __ Addu(first_symbol_table_element, symbol_table,
5853 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5854
5855 // Registers.
5856 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5857 // hash: hash of two character string
5858 // mask: capacity mask
5859 // first_symbol_table_element: address of the first element of
5860 // the symbol table
5861 // undefined: the undefined object
5862 // scratch: -
5863
5864 // Perform a number of probes in the symbol table.
5865 static const int kProbes = 4;
5866 Label found_in_symbol_table;
5867 Label next_probe[kProbes];
5868 Register candidate = scratch5; // Scratch register contains candidate.
5869 for (int i = 0; i < kProbes; i++) {
5870 // Calculate entry in symbol table.
5871 if (i > 0) {
5872 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5873 } else {
5874 __ mov(candidate, hash);
5875 }
5876
5877 __ And(candidate, candidate, Operand(mask));
5878
5879 // Load the entry from the symble table.
5880 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5881 __ sll(scratch, candidate, kPointerSizeLog2);
5882 __ Addu(scratch, scratch, first_symbol_table_element);
5883 __ lw(candidate, MemOperand(scratch));
5884
5885 // If entry is undefined no string with this hash can be found.
5886 Label is_string;
5887 __ GetObjectType(candidate, scratch, scratch);
5888 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5889
5890 __ Branch(not_found, eq, undefined, Operand(candidate));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005891 // Must be the hole (deleted entry).
Ben Murdoch257744e2011-11-30 15:57:28 +00005892 if (FLAG_debug_code) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005893 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
5894 __ Assert(eq, "oddball in symbol table is not undefined or the hole",
Ben Murdoch257744e2011-11-30 15:57:28 +00005895 scratch, Operand(candidate));
5896 }
5897 __ jmp(&next_probe[i]);
5898
5899 __ bind(&is_string);
5900
5901 // Check that the candidate is a non-external ASCII string. The instance
5902 // type is still in the scratch register from the CompareObjectType
5903 // operation.
5904 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5905
5906 // If length is not 2 the string is not a candidate.
5907 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5908 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5909
5910 // Check if the two characters match.
5911 // Assumes that word load is little endian.
5912 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5913 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5914 __ bind(&next_probe[i]);
5915 }
5916
5917 // No matching 2 character string found by probing.
5918 __ jmp(not_found);
5919
5920 // Scratch register contains result when we fall through to here.
5921 Register result = candidate;
5922 __ bind(&found_in_symbol_table);
5923 __ mov(v0, result);
Steve Block44f0eee2011-05-26 01:26:41 +01005924}
5925
5926
5927void StringHelper::GenerateHashInit(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005928 Register hash,
5929 Register character) {
5930 // hash = seed + character + ((seed + character) << 10);
5931 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5932 // Untag smi seed and add the character.
5933 __ SmiUntag(hash);
Ben Murdoch257744e2011-11-30 15:57:28 +00005934 __ addu(hash, hash, character);
Ben Murdochc7cc0282012-03-05 14:35:55 +00005935 __ sll(at, hash, 10);
5936 __ addu(hash, hash, at);
Ben Murdoch257744e2011-11-30 15:57:28 +00005937 // hash ^= hash >> 6;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005938 __ srl(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005939 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005940}
5941
5942
5943void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005944 Register hash,
5945 Register character) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005946 // hash += character;
5947 __ addu(hash, hash, character);
5948 // hash += hash << 10;
5949 __ sll(at, hash, 10);
5950 __ addu(hash, hash, at);
5951 // hash ^= hash >> 6;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005952 __ srl(at, hash, 6);
Ben Murdoch257744e2011-11-30 15:57:28 +00005953 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005954}
5955
5956
5957void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Ben Murdochc7cc0282012-03-05 14:35:55 +00005958 Register hash) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005959 // hash += hash << 3;
5960 __ sll(at, hash, 3);
5961 __ addu(hash, hash, at);
5962 // hash ^= hash >> 11;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005963 __ srl(at, hash, 11);
Ben Murdoch257744e2011-11-30 15:57:28 +00005964 __ xor_(hash, hash, at);
5965 // hash += hash << 15;
5966 __ sll(at, hash, 15);
5967 __ addu(hash, hash, at);
5968
Ben Murdochc7cc0282012-03-05 14:35:55 +00005969 __ li(at, Operand(String::kHashBitMask));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005970 __ and_(hash, hash, at);
5971
Ben Murdoch257744e2011-11-30 15:57:28 +00005972 // if (hash == 0) hash = 27;
Ben Murdochc7cc0282012-03-05 14:35:55 +00005973 __ ori(at, zero_reg, StringHasher::kZeroHash);
Ben Murdoch257744e2011-11-30 15:57:28 +00005974 __ movz(hash, at, hash);
Steve Block44f0eee2011-05-26 01:26:41 +01005975}
5976
5977
5978void SubStringStub::Generate(MacroAssembler* masm) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00005979 Label runtime;
Ben Murdoch257744e2011-11-30 15:57:28 +00005980 // Stack frame on entry.
5981 // ra: return address
5982 // sp[0]: to
5983 // sp[4]: from
5984 // sp[8]: string
5985
5986 // This stub is called from the native-call %_SubString(...), so
5987 // nothing can be assumed about the arguments. It is tested that:
5988 // "string" is a sequential string,
5989 // both "from" and "to" are smis, and
5990 // 0 <= from <= to <= string.length.
5991 // If any of these assumptions fail, we call the runtime system.
5992
5993 static const int kToOffset = 0 * kPointerSize;
5994 static const int kFromOffset = 1 * kPointerSize;
5995 static const int kStringOffset = 2 * kPointerSize;
5996
Ben Murdochc7cc0282012-03-05 14:35:55 +00005997 __ lw(a2, MemOperand(sp, kToOffset));
5998 __ lw(a3, MemOperand(sp, kFromOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00005999 STATIC_ASSERT(kFromOffset == kToOffset + 4);
6000 STATIC_ASSERT(kSmiTag == 0);
6001 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6002
Ben Murdochc7cc0282012-03-05 14:35:55 +00006003 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6004 // safe in this case.
6005 __ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT);
6006 __ SmiUntag(a2);
6007 __ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT);
6008 __ SmiUntag(a3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006009
Ben Murdochc7cc0282012-03-05 14:35:55 +00006010 // Both a2 and a3 are untagged integers.
Ben Murdoch257744e2011-11-30 15:57:28 +00006011
Ben Murdochc7cc0282012-03-05 14:35:55 +00006012 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
Ben Murdoch257744e2011-11-30 15:57:28 +00006013
6014 __ subu(a2, t5, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006015 __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to.
Ben Murdoch257744e2011-11-30 15:57:28 +00006016
Ben Murdochc7cc0282012-03-05 14:35:55 +00006017 // Make sure first argument is a string.
Ben Murdoch589d6972011-11-30 16:04:58 +00006018 __ lw(v0, MemOperand(sp, kStringOffset));
Ben Murdochc7cc0282012-03-05 14:35:55 +00006019 __ Branch(&runtime, eq, v0, Operand(kSmiTagMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00006020
Ben Murdoch589d6972011-11-30 16:04:58 +00006021 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00006022 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
Ben Murdoch589d6972011-11-30 16:04:58 +00006023 __ And(t4, v0, Operand(kIsNotStringMask));
Ben Murdoch257744e2011-11-30 15:57:28 +00006024
Ben Murdochc7cc0282012-03-05 14:35:55 +00006025 __ Branch(&runtime, ne, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006026
Ben Murdoch589d6972011-11-30 16:04:58 +00006027 // Short-cut for the case of trivial substring.
6028 Label return_v0;
6029 // v0: original string
6030 // a2: result string length
6031 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
6032 __ sra(t0, t0, 1);
6033 __ Branch(&return_v0, eq, a2, Operand(t0));
6034
Ben Murdoch257744e2011-11-30 15:57:28 +00006035
6036 Label result_longer_than_two;
Ben Murdochc7cc0282012-03-05 14:35:55 +00006037 // Check for special case of two character ASCII string, in which case
6038 // we do a lookup in the symbol table first.
6039 __ li(t0, 2);
6040 __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
6041 __ Branch(&runtime, lt, a2, Operand(t0));
Ben Murdoch257744e2011-11-30 15:57:28 +00006042
Ben Murdochc7cc0282012-03-05 14:35:55 +00006043 __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
6044
Ben Murdoch257744e2011-11-30 15:57:28 +00006045 // Get the two characters forming the sub string.
Ben Murdoch589d6972011-11-30 16:04:58 +00006046 __ Addu(v0, v0, Operand(a3));
6047 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6048 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
Ben Murdoch257744e2011-11-30 15:57:28 +00006049
6050 // Try to lookup two character string in symbol table.
6051 Label make_two_character_string;
6052 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6053 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
Ben Murdoch589d6972011-11-30 16:04:58 +00006054 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006055
6056 // a2: result string length.
6057 // a3: two characters combined into halfword in little endian byte order.
6058 __ bind(&make_two_character_string);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006059 __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006060 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00006061 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006062
6063 __ bind(&result_longer_than_two);
6064
Ben Murdochc7cc0282012-03-05 14:35:55 +00006065 // Deal with different string types: update the index if necessary
6066 // and put the underlying string into t1.
6067 // v0: original string
6068 // a1: instance type
6069 // a2: length
6070 // a3: from index (untagged)
6071 Label underlying_unpacked, sliced_string, seq_or_external_string;
6072 // If the string is not indirect, it can only be sequential or external.
6073 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6074 STATIC_ASSERT(kIsIndirectStringMask != 0);
6075 __ And(t0, a1, Operand(kIsIndirectStringMask));
6076 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
Ben Murdoch589d6972011-11-30 16:04:58 +00006077
Ben Murdochc7cc0282012-03-05 14:35:55 +00006078 __ And(t0, a1, Operand(kSlicedNotConsMask));
6079 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6080 // Cons string. Check whether it is flat, then fetch first part.
6081 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6082 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6083 __ Branch(&runtime, ne, t1, Operand(t0));
6084 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6085 // Update instance type.
6086 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6087 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6088 __ jmp(&underlying_unpacked);
Ben Murdoch257744e2011-11-30 15:57:28 +00006089
Ben Murdochc7cc0282012-03-05 14:35:55 +00006090 __ bind(&sliced_string);
6091 // Sliced string. Fetch parent and correct start index by offset.
6092 __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6093 __ sra(t1, t1, 1);
6094 __ Addu(a3, a3, t1);
6095 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6096 // Update instance type.
6097 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6098 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6099 __ jmp(&underlying_unpacked);
6100
6101 __ bind(&seq_or_external_string);
6102 // Sequential or external string. Just move string to the expected register.
6103 __ mov(t1, v0);
6104
6105 __ bind(&underlying_unpacked);
6106
6107 if (FLAG_string_slices) {
6108 Label copy_routine;
6109 // t1: underlying subject string
6110 // a1: instance type of underlying subject string
6111 // a2: length
6112 // a3: adjusted start index (untagged)
6113 // Short slice. Copy instead of slicing.
6114 __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
6115 // Allocate new sliced string. At this point we do not reload the instance
6116 // type including the string encoding because we simply rely on the info
6117 // provided by the original string. It does not matter if the original
6118 // string's encoding is wrong because we always have to recheck encoding of
6119 // the newly created string's parent anyways due to externalized strings.
6120 Label two_byte_slice, set_slice_header;
6121 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6122 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6123 __ And(t0, a1, Operand(kStringEncodingMask));
6124 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6125 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6126 __ jmp(&set_slice_header);
6127 __ bind(&two_byte_slice);
6128 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6129 __ bind(&set_slice_header);
6130 __ sll(a3, a3, 1);
6131 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6132 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6133 __ jmp(&return_v0);
6134
6135 __ bind(&copy_routine);
6136 }
6137
6138 // t1: underlying subject string
6139 // a1: instance type of underlying subject string
6140 // a2: length
6141 // a3: adjusted start index (untagged)
6142 Label two_byte_sequential, sequential_string, allocate_result;
6143 STATIC_ASSERT(kExternalStringTag != 0);
6144 STATIC_ASSERT(kSeqStringTag == 0);
6145 __ And(t0, a1, Operand(kExternalStringTag));
6146 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6147
6148 // Handle external string.
6149 // Rule out short external strings.
6150 STATIC_CHECK(kShortExternalStringTag != 0);
6151 __ And(t0, a1, Operand(kShortExternalStringTag));
6152 __ Branch(&runtime, ne, t0, Operand(zero_reg));
6153 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
6154 // t1 already points to the first character of underlying string.
6155 __ jmp(&allocate_result);
6156
6157 __ bind(&sequential_string);
6158 // Locate first character of underlying subject string.
6159 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6160 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6161
6162 __ bind(&allocate_result);
6163 // Sequential acii string. Allocate the result.
6164 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6165 __ And(t0, a1, Operand(kStringEncodingMask));
6166 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6167
6168 // Allocate and copy the resulting ASCII string.
6169 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6170
6171 // Locate first character of substring to copy.
6172 __ Addu(t1, t1, a3);
6173
Ben Murdoch257744e2011-11-30 15:57:28 +00006174 // Locate first character of result.
6175 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
Ben Murdoch257744e2011-11-30 15:57:28 +00006176
Ben Murdoch589d6972011-11-30 16:04:58 +00006177 // v0: result string
6178 // a1: first character of result string
6179 // a2: result string length
6180 // t1: first character of substring to copy
Ben Murdoch257744e2011-11-30 15:57:28 +00006181 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6182 StringHelper::GenerateCopyCharactersLong(
6183 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
Ben Murdoch589d6972011-11-30 16:04:58 +00006184 __ jmp(&return_v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006185
Ben Murdochc7cc0282012-03-05 14:35:55 +00006186 // Allocate and copy the resulting two-byte string.
6187 __ bind(&two_byte_sequential);
6188 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006189
Ben Murdochc7cc0282012-03-05 14:35:55 +00006190 // Locate first character of substring to copy.
Ben Murdoch589d6972011-11-30 16:04:58 +00006191 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006192 __ sll(t0, a3, 1);
6193 __ Addu(t1, t1, t0);
Ben Murdoch257744e2011-11-30 15:57:28 +00006194 // Locate first character of result.
6195 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
Ben Murdoch589d6972011-11-30 16:04:58 +00006196
Ben Murdoch257744e2011-11-30 15:57:28 +00006197 // v0: result string.
6198 // a1: first character of result.
6199 // a2: result length.
Ben Murdoch589d6972011-11-30 16:04:58 +00006200 // t1: first character of substring to copy.
Ben Murdoch257744e2011-11-30 15:57:28 +00006201 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6202 StringHelper::GenerateCopyCharactersLong(
6203 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
Ben Murdoch589d6972011-11-30 16:04:58 +00006204
6205 __ bind(&return_v0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006206 Counters* counters = masm->isolate()->counters();
Ben Murdoch257744e2011-11-30 15:57:28 +00006207 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006208 __ DropAndRet(3);
Ben Murdoch257744e2011-11-30 15:57:28 +00006209
6210 // Just jump to runtime to create the sub string.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006211 __ bind(&runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006212 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6213}
6214
6215
6216void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6217 Register left,
6218 Register right,
6219 Register scratch1,
6220 Register scratch2,
6221 Register scratch3) {
6222 Register length = scratch1;
6223
6224 // Compare lengths.
6225 Label strings_not_equal, check_zero_length;
6226 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6227 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6228 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6229 __ bind(&strings_not_equal);
6230 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6231 __ Ret();
6232
6233 // Check if the length is zero.
6234 Label compare_chars;
6235 __ bind(&check_zero_length);
6236 STATIC_ASSERT(kSmiTag == 0);
6237 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6238 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6239 __ Ret();
6240
6241 // Compare characters.
6242 __ bind(&compare_chars);
6243
6244 GenerateAsciiCharsCompareLoop(masm,
6245 left, right, length, scratch2, scratch3, v0,
6246 &strings_not_equal);
6247
6248 // Characters are equal.
6249 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6250 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01006251}
6252
6253
6254void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Steve Block44f0eee2011-05-26 01:26:41 +01006255 Register left,
Ben Murdoch257744e2011-11-30 15:57:28 +00006256 Register right,
Steve Block44f0eee2011-05-26 01:26:41 +01006257 Register scratch1,
6258 Register scratch2,
6259 Register scratch3,
6260 Register scratch4) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006261 Label result_not_equal, compare_lengths;
6262 // Find minimum length and length difference.
6263 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6264 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6265 __ Subu(scratch3, scratch1, Operand(scratch2));
6266 Register length_delta = scratch3;
6267 __ slt(scratch4, scratch2, scratch1);
6268 __ movn(scratch1, scratch2, scratch4);
6269 Register min_length = scratch1;
6270 STATIC_ASSERT(kSmiTag == 0);
6271 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6272
6273 // Compare loop.
6274 GenerateAsciiCharsCompareLoop(masm,
6275 left, right, min_length, scratch2, scratch4, v0,
6276 &result_not_equal);
6277
6278 // Compare lengths - strings up to min-length are equal.
6279 __ bind(&compare_lengths);
6280 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6281 // Use length_delta as result if it's zero.
6282 __ mov(scratch2, length_delta);
6283 __ mov(scratch4, zero_reg);
6284 __ mov(v0, zero_reg);
6285
6286 __ bind(&result_not_equal);
6287 // Conditionally update the result based either on length_delta or
6288 // the last comparion performed in the loop above.
6289 Label ret;
6290 __ Branch(&ret, eq, scratch2, Operand(scratch4));
6291 __ li(v0, Operand(Smi::FromInt(GREATER)));
6292 __ Branch(&ret, gt, scratch2, Operand(scratch4));
6293 __ li(v0, Operand(Smi::FromInt(LESS)));
6294 __ bind(&ret);
6295 __ Ret();
6296}
6297
6298
6299void StringCompareStub::GenerateAsciiCharsCompareLoop(
6300 MacroAssembler* masm,
6301 Register left,
6302 Register right,
6303 Register length,
6304 Register scratch1,
6305 Register scratch2,
6306 Register scratch3,
6307 Label* chars_not_equal) {
6308 // Change index to run from -length to -1 by adding length to string
6309 // start. This means that loop ends when index reaches zero, which
6310 // doesn't need an additional compare.
6311 __ SmiUntag(length);
6312 __ Addu(scratch1, length,
6313 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6314 __ Addu(left, left, Operand(scratch1));
6315 __ Addu(right, right, Operand(scratch1));
6316 __ Subu(length, zero_reg, length);
6317 Register index = length; // index = -length;
6318
6319
6320 // Compare loop.
6321 Label loop;
6322 __ bind(&loop);
6323 __ Addu(scratch3, left, index);
6324 __ lbu(scratch1, MemOperand(scratch3));
6325 __ Addu(scratch3, right, index);
6326 __ lbu(scratch2, MemOperand(scratch3));
6327 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6328 __ Addu(index, index, 1);
6329 __ Branch(&loop, ne, index, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01006330}
6331
6332
6333void StringCompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006334 Label runtime;
6335
6336 Counters* counters = masm->isolate()->counters();
6337
6338 // Stack frame on entry.
6339 // sp[0]: right string
6340 // sp[4]: left string
6341 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6342 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6343
6344 Label not_same;
6345 __ Branch(&not_same, ne, a0, Operand(a1));
6346 STATIC_ASSERT(EQUAL == 0);
6347 STATIC_ASSERT(kSmiTag == 0);
6348 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6349 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6350 __ Addu(sp, sp, Operand(2 * kPointerSize));
6351 __ Ret();
6352
6353 __ bind(&not_same);
6354
6355 // Check that both objects are sequential ASCII strings.
6356 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6357
6358 // Compare flat ASCII strings natively. Remove arguments from stack first.
6359 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6360 __ Addu(sp, sp, Operand(2 * kPointerSize));
6361 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6362
6363 __ bind(&runtime);
6364 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01006365}
6366
6367
6368void StringAddStub::Generate(MacroAssembler* masm) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00006369 Label call_runtime, call_builtin;
Ben Murdoch257744e2011-11-30 15:57:28 +00006370 Builtins::JavaScript builtin_id = Builtins::ADD;
6371
6372 Counters* counters = masm->isolate()->counters();
6373
6374 // Stack on entry:
6375 // sp[0]: second argument (right).
6376 // sp[4]: first argument (left).
6377
6378 // Load the two arguments.
6379 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6380 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6381
6382 // Make sure that both arguments are strings if not known in advance.
6383 if (flags_ == NO_STRING_ADD_FLAGS) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00006384 __ JumpIfEitherSmi(a0, a1, &call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006385 // Load instance types.
6386 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6387 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6388 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6389 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6390 STATIC_ASSERT(kStringTag == 0);
6391 // If either is not a string, go to runtime.
6392 __ Or(t4, t0, Operand(t1));
6393 __ And(t4, t4, Operand(kIsNotStringMask));
Ben Murdochc7cc0282012-03-05 14:35:55 +00006394 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006395 } else {
6396 // Here at least one of the arguments is definitely a string.
6397 // We convert the one that is not known to be a string.
6398 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6399 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6400 GenerateConvertArgument(
6401 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6402 builtin_id = Builtins::STRING_ADD_RIGHT;
6403 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6404 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6405 GenerateConvertArgument(
6406 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6407 builtin_id = Builtins::STRING_ADD_LEFT;
6408 }
6409 }
6410
6411 // Both arguments are strings.
6412 // a0: first string
6413 // a1: second string
6414 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6415 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6416 {
6417 Label strings_not_empty;
6418 // Check if either of the strings are empty. In that case return the other.
6419 // These tests use zero-length check on string-length whch is an Smi.
6420 // Assert that Smi::FromInt(0) is really 0.
6421 STATIC_ASSERT(kSmiTag == 0);
6422 ASSERT(Smi::FromInt(0) == 0);
6423 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6424 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6425 __ mov(v0, a0); // Assume we'll return first string (from a0).
6426 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
6427 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6428 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6429 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6430 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6431
6432 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006433 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006434
6435 __ bind(&strings_not_empty);
6436 }
6437
6438 // Untag both string-lengths.
6439 __ sra(a2, a2, kSmiTagSize);
6440 __ sra(a3, a3, kSmiTagSize);
6441
6442 // Both strings are non-empty.
6443 // a0: first string
6444 // a1: second string
6445 // a2: length of first string
6446 // a3: length of second string
6447 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6448 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6449 // Look at the length of the result of adding the two strings.
6450 Label string_add_flat_result, longer_than_two;
6451 // Adding two lengths can't overflow.
6452 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6453 __ Addu(t2, a2, Operand(a3));
6454 // Use the symbol table when adding two one character strings, as it
6455 // helps later optimizations to return a symbol here.
6456 __ Branch(&longer_than_two, ne, t2, Operand(2));
6457
6458 // Check that both strings are non-external ASCII strings.
6459 if (flags_ != NO_STRING_ADD_FLAGS) {
6460 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6461 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6462 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6463 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6464 }
6465 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
Ben Murdochc7cc0282012-03-05 14:35:55 +00006466 &call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006467
6468 // Get the two characters forming the sub string.
6469 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6470 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6471
6472 // Try to lookup two character string in symbol table. If it is not found
6473 // just allocate a new one.
6474 Label make_two_character_string;
6475 StringHelper::GenerateTwoCharacterSymbolTableProbe(
Ben Murdochc7cc0282012-03-05 14:35:55 +00006476 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
Ben Murdoch257744e2011-11-30 15:57:28 +00006477 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006478 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006479
6480 __ bind(&make_two_character_string);
6481 // Resulting string has length 2 and first chars of two strings
6482 // are combined into single halfword in a2 register.
6483 // So we can fill resulting string without two loops by a single
6484 // halfword store instruction (which assumes that processor is
6485 // in a little endian mode).
6486 __ li(t2, Operand(2));
Ben Murdochc7cc0282012-03-05 14:35:55 +00006487 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006488 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6489 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006490 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006491
6492 __ bind(&longer_than_two);
6493 // Check if resulting string will be flat.
6494 __ Branch(&string_add_flat_result, lt, t2,
Ben Murdochc7cc0282012-03-05 14:35:55 +00006495 Operand(ConsString::kMinLength));
Ben Murdoch257744e2011-11-30 15:57:28 +00006496 // Handle exceptionally long strings in the runtime system.
6497 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6498 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6499 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006500 __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
Ben Murdoch257744e2011-11-30 15:57:28 +00006501
6502 // If result is not supposed to be flat, allocate a cons string object.
6503 // If both strings are ASCII the result is an ASCII cons string.
6504 if (flags_ != NO_STRING_ADD_FLAGS) {
6505 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6506 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6507 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6508 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6509 }
6510 Label non_ascii, allocated, ascii_data;
6511 STATIC_ASSERT(kTwoByteStringTag == 0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006512 // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
Ben Murdoch257744e2011-11-30 15:57:28 +00006513 __ And(t4, t0, Operand(t1));
6514 __ And(t4, t4, Operand(kStringEncodingMask));
6515 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6516
6517 // Allocate an ASCII cons string.
6518 __ bind(&ascii_data);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006519 __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006520 __ bind(&allocated);
6521 // Fill the fields of the cons string.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006522 __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
6523 __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00006524 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006525 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006526
6527 __ bind(&non_ascii);
6528 // At least one of the strings is two-byte. Check whether it happens
6529 // to contain only ASCII characters.
6530 // t0: first instance type.
6531 // t1: second instance type.
6532 // Branch to if _both_ instances have kAsciiDataHintMask set.
6533 __ And(at, t0, Operand(kAsciiDataHintMask));
6534 __ and_(at, at, t1);
6535 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6536
6537 __ xor_(t0, t0, t1);
6538 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6539 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6540 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6541
6542 // Allocate a two byte cons string.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006543 __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006544 __ Branch(&allocated);
6545
Ben Murdochc7cc0282012-03-05 14:35:55 +00006546 // We cannot encounter sliced strings or cons strings here since:
6547 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6548 // Handle creating a flat result from either external or sequential strings.
6549 // Locate the first characters' locations.
Ben Murdoch257744e2011-11-30 15:57:28 +00006550 // a0: first string
6551 // a1: second string
6552 // a2: length of first string
6553 // a3: length of second string
6554 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6555 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6556 // t2: sum of lengths.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006557 Label first_prepared, second_prepared;
Ben Murdoch257744e2011-11-30 15:57:28 +00006558 __ bind(&string_add_flat_result);
6559 if (flags_ != NO_STRING_ADD_FLAGS) {
6560 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6561 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6562 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6563 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6564 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00006565 // Check whether both strings have same encoding
6566 __ Xor(t3, t0, Operand(t1));
6567 __ And(t3, t3, Operand(kStringEncodingMask));
6568 __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006569
Ben Murdochc7cc0282012-03-05 14:35:55 +00006570 STATIC_ASSERT(kSeqStringTag == 0);
6571 __ And(t4, t0, Operand(kStringRepresentationMask));
6572
6573 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6574 Label skip_first_add;
6575 __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6576 __ Branch(USE_DELAY_SLOT, &first_prepared);
6577 __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6578 __ bind(&skip_first_add);
6579 // External string: rule out short external string and load string resource.
6580 STATIC_ASSERT(kShortExternalStringTag != 0);
6581 __ And(t4, t0, Operand(kShortExternalStringMask));
6582 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6583 __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
6584 __ bind(&first_prepared);
6585
6586 STATIC_ASSERT(kSeqStringTag == 0);
6587 __ And(t4, t1, Operand(kStringRepresentationMask));
6588 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6589 Label skip_second_add;
6590 __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6591 __ Branch(USE_DELAY_SLOT, &second_prepared);
6592 __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6593 __ bind(&skip_second_add);
6594 // External string: rule out short external string and load string resource.
6595 STATIC_ASSERT(kShortExternalStringTag != 0);
6596 __ And(t4, t1, Operand(kShortExternalStringMask));
6597 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6598 __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
6599 __ bind(&second_prepared);
6600
6601 Label non_ascii_string_add_flat_result;
6602 // t3: first character of first string
6603 // a1: first character of second string
Ben Murdoch257744e2011-11-30 15:57:28 +00006604 // a2: length of first string
6605 // a3: length of second string
Ben Murdoch257744e2011-11-30 15:57:28 +00006606 // t2: sum of lengths.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006607 // Both strings have the same encoding.
6608 STATIC_ASSERT(kTwoByteStringTag == 0);
6609 __ And(t4, t1, Operand(kStringEncodingMask));
6610 __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00006611
Ben Murdochc7cc0282012-03-05 14:35:55 +00006612 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6613 __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6614 // v0: result string.
6615 // t3: first character of first string.
6616 // a1: first character of second string
Ben Murdoch257744e2011-11-30 15:57:28 +00006617 // a2: length of first string.
6618 // a3: length of second string.
6619 // t2: first character of result.
Ben Murdoch257744e2011-11-30 15:57:28 +00006620
Ben Murdochc7cc0282012-03-05 14:35:55 +00006621 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00006622 // t2: next character of result.
Ben Murdoch257744e2011-11-30 15:57:28 +00006623 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
Ben Murdoch257744e2011-11-30 15:57:28 +00006624 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006625 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006626
6627 __ bind(&non_ascii_string_add_flat_result);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006628 __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6629 __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6630 // v0: result string.
6631 // t3: first character of first string.
6632 // a1: first character of second string.
Ben Murdoch257744e2011-11-30 15:57:28 +00006633 // a2: length of first string.
6634 // a3: length of second string.
6635 // t2: first character of result.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006636 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6637 // t2: next character of result.
Ben Murdoch257744e2011-11-30 15:57:28 +00006638 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6639
Ben Murdoch257744e2011-11-30 15:57:28 +00006640 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006641 __ DropAndRet(2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006642
6643 // Just jump to runtime to add the two strings.
Ben Murdochc7cc0282012-03-05 14:35:55 +00006644 __ bind(&call_runtime);
Ben Murdoch257744e2011-11-30 15:57:28 +00006645 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6646
6647 if (call_builtin.is_linked()) {
6648 __ bind(&call_builtin);
6649 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6650 }
6651}
6652
6653
6654void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6655 int stack_offset,
6656 Register arg,
6657 Register scratch1,
6658 Register scratch2,
6659 Register scratch3,
6660 Register scratch4,
6661 Label* slow) {
6662 // First check if the argument is already a string.
6663 Label not_string, done;
6664 __ JumpIfSmi(arg, &not_string);
6665 __ GetObjectType(arg, scratch1, scratch1);
6666 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6667
6668 // Check the number to string cache.
6669 Label not_cached;
6670 __ bind(&not_string);
6671 // Puts the cached result into scratch1.
6672 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6673 arg,
6674 scratch1,
6675 scratch2,
6676 scratch3,
6677 scratch4,
6678 false,
6679 &not_cached);
6680 __ mov(arg, scratch1);
6681 __ sw(arg, MemOperand(sp, stack_offset));
6682 __ jmp(&done);
6683
6684 // Check if the argument is a safe string wrapper.
6685 __ bind(&not_cached);
6686 __ JumpIfSmi(arg, slow);
6687 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6688 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6689 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6690 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6691 __ And(scratch2, scratch2, scratch4);
6692 __ Branch(slow, ne, scratch2, Operand(scratch4));
6693 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6694 __ sw(arg, MemOperand(sp, stack_offset));
6695
6696 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01006697}
6698
6699
6700void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006701 ASSERT(state_ == CompareIC::SMIS);
6702 Label miss;
6703 __ Or(a2, a1, a0);
6704 __ JumpIfNotSmi(a2, &miss);
6705
6706 if (GetCondition() == eq) {
6707 // For equality we do not care about the sign of the result.
6708 __ Subu(v0, a0, a1);
6709 } else {
6710 // Untag before subtracting to avoid handling overflow.
6711 __ SmiUntag(a1);
6712 __ SmiUntag(a0);
6713 __ Subu(v0, a1, a0);
6714 }
6715 __ Ret();
6716
6717 __ bind(&miss);
6718 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006719}
6720
6721
6722void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006723 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6724
6725 Label generic_stub;
6726 Label unordered;
6727 Label miss;
6728 __ And(a2, a1, Operand(a0));
6729 __ JumpIfSmi(a2, &generic_stub);
6730
6731 __ GetObjectType(a0, a2, a2);
6732 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6733 __ GetObjectType(a1, a2, a2);
6734 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6735
6736 // Inlining the double comparison and falling back to the general compare
6737 // stub if NaN is involved or FPU is unsupported.
6738 if (CpuFeatures::IsSupported(FPU)) {
6739 CpuFeatures::Scope scope(FPU);
6740
6741 // Load left and right operand.
6742 __ Subu(a2, a1, Operand(kHeapObjectTag));
6743 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6744 __ Subu(a2, a0, Operand(kHeapObjectTag));
6745 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6746
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006747 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6748 Label fpu_eq, fpu_lt;
6749 // Test if equal, and also handle the unordered/NaN case.
6750 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006751
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006752 // Test if less (unordered case is already handled).
6753 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
Ben Murdoch257744e2011-11-30 15:57:28 +00006754
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006755 // Otherwise it's greater, so just fall thru, and return.
6756 __ Ret(USE_DELAY_SLOT);
6757 __ li(v0, Operand(GREATER)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006758
Ben Murdoch257744e2011-11-30 15:57:28 +00006759 __ bind(&fpu_eq);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006760 __ Ret(USE_DELAY_SLOT);
6761 __ li(v0, Operand(EQUAL)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006762
6763 __ bind(&fpu_lt);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006764 __ Ret(USE_DELAY_SLOT);
6765 __ li(v0, Operand(LESS)); // In delay slot.
Ben Murdoch257744e2011-11-30 15:57:28 +00006766
6767 __ bind(&unordered);
6768 }
6769
6770 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6771 __ bind(&generic_stub);
6772 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6773
6774 __ bind(&miss);
6775 GenerateMiss(masm);
6776}
6777
6778
6779void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6780 ASSERT(state_ == CompareIC::SYMBOLS);
6781 Label miss;
6782
6783 // Registers containing left and right operands respectively.
6784 Register left = a1;
6785 Register right = a0;
6786 Register tmp1 = a2;
6787 Register tmp2 = a3;
6788
6789 // Check that both operands are heap objects.
6790 __ JumpIfEitherSmi(left, right, &miss);
6791
6792 // Check that both operands are symbols.
6793 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6794 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6795 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6796 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6797 STATIC_ASSERT(kSymbolTag != 0);
6798 __ And(tmp1, tmp1, Operand(tmp2));
6799 __ And(tmp1, tmp1, kIsSymbolMask);
6800 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6801 // Make sure a0 is non-zero. At this point input operands are
6802 // guaranteed to be non-zero.
6803 ASSERT(right.is(a0));
6804 STATIC_ASSERT(EQUAL == 0);
6805 STATIC_ASSERT(kSmiTag == 0);
6806 __ mov(v0, right);
6807 // Symbols are compared by identity.
6808 __ Ret(ne, left, Operand(right));
6809 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6810 __ Ret();
6811
6812 __ bind(&miss);
6813 GenerateMiss(masm);
6814}
6815
6816
6817void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6818 ASSERT(state_ == CompareIC::STRINGS);
6819 Label miss;
6820
6821 // Registers containing left and right operands respectively.
6822 Register left = a1;
6823 Register right = a0;
6824 Register tmp1 = a2;
6825 Register tmp2 = a3;
6826 Register tmp3 = t0;
6827 Register tmp4 = t1;
6828 Register tmp5 = t2;
6829
6830 // Check that both operands are heap objects.
6831 __ JumpIfEitherSmi(left, right, &miss);
6832
6833 // Check that both operands are strings. This leaves the instance
6834 // types loaded in tmp1 and tmp2.
6835 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6836 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6837 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6838 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6839 STATIC_ASSERT(kNotStringTag != 0);
6840 __ Or(tmp3, tmp1, tmp2);
6841 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6842 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6843
6844 // Fast check for identical strings.
6845 Label left_ne_right;
6846 STATIC_ASSERT(EQUAL == 0);
6847 STATIC_ASSERT(kSmiTag == 0);
6848 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6849 __ mov(v0, zero_reg); // In the delay slot.
6850 __ Ret();
6851 __ bind(&left_ne_right);
6852
6853 // Handle not identical strings.
6854
6855 // Check that both strings are symbols. If they are, we're done
6856 // because we already know they are not identical.
6857 ASSERT(GetCondition() == eq);
6858 STATIC_ASSERT(kSymbolTag != 0);
6859 __ And(tmp3, tmp1, Operand(tmp2));
6860 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6861 Label is_symbol;
6862 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6863 __ mov(v0, a0); // In the delay slot.
6864 // Make sure a0 is non-zero. At this point input operands are
6865 // guaranteed to be non-zero.
6866 ASSERT(right.is(a0));
6867 __ Ret();
6868 __ bind(&is_symbol);
6869
6870 // Check that both strings are sequential ASCII.
6871 Label runtime;
6872 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6873 &runtime);
6874
6875 // Compare flat ASCII strings. Returns when done.
6876 StringCompareStub::GenerateFlatAsciiStringEquals(
6877 masm, left, right, tmp1, tmp2, tmp3);
6878
6879 // Handle more complex cases in runtime.
6880 __ bind(&runtime);
6881 __ Push(left, right);
6882 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6883
6884 __ bind(&miss);
6885 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006886}
6887
6888
6889void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006890 ASSERT(state_ == CompareIC::OBJECTS);
6891 Label miss;
6892 __ And(a2, a1, Operand(a0));
6893 __ JumpIfSmi(a2, &miss);
6894
6895 __ GetObjectType(a0, a2, a2);
6896 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6897 __ GetObjectType(a1, a2, a2);
6898 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6899
6900 ASSERT(GetCondition() == eq);
6901 __ Subu(v0, a0, Operand(a1));
6902 __ Ret();
6903
6904 __ bind(&miss);
6905 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006906}
6907
6908
Ben Murdochc7cc0282012-03-05 14:35:55 +00006909void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6910 Label miss;
6911 __ And(a2, a1, a0);
6912 __ JumpIfSmi(a2, &miss);
6913 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
6914 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
6915 __ Branch(&miss, ne, a2, Operand(known_map_));
6916 __ Branch(&miss, ne, a3, Operand(known_map_));
Ben Murdoch257744e2011-11-30 15:57:28 +00006917
Ben Murdochc7cc0282012-03-05 14:35:55 +00006918 __ Ret(USE_DELAY_SLOT);
6919 __ subu(v0, a0, a1);
6920
6921 __ bind(&miss);
6922 GenerateMiss(masm);
6923}
6924
6925void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006926 {
Ben Murdochc7cc0282012-03-05 14:35:55 +00006927 // Call the runtime system in a fresh internal frame.
6928 ExternalReference miss =
6929 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006930 FrameScope scope(masm, StackFrame::INTERNAL);
6931 __ Push(a1, a0);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006932 __ push(ra);
6933 __ Push(a1, a0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006934 __ li(t0, Operand(Smi::FromInt(op_)));
6935 __ push(t0);
6936 __ CallExternalReference(miss, 3);
Ben Murdochc7cc0282012-03-05 14:35:55 +00006937 // Compute the entry point of the rewritten stub.
6938 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6939 // Restore registers.
6940 __ Pop(a1, a0, ra);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006941 }
Ben Murdoch257744e2011-11-30 15:57:28 +00006942 __ Jump(a2);
6943}
6944
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006945
Ben Murdoch257744e2011-11-30 15:57:28 +00006946void DirectCEntryStub::Generate(MacroAssembler* masm) {
6947 // No need to pop or drop anything, LeaveExitFrame will restore the old
6948 // stack, thus dropping the allocated space for the return value.
6949 // The saved ra is after the reserved stack space for the 4 args.
6950 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6951
Ben Murdoch592a9fc2012-03-05 11:04:45 +00006952 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006953 // In case of an error the return address may point to a memory area
6954 // filled with kZapValue by the GC.
6955 // Dereference the address and check for this.
6956 __ lw(t0, MemOperand(t9));
6957 __ Assert(ne, "Received invalid return address.", t0,
6958 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6959 }
6960 __ Jump(t9);
Steve Block44f0eee2011-05-26 01:26:41 +01006961}
6962
6963
Ben Murdoch257744e2011-11-30 15:57:28 +00006964void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6965 ExternalReference function) {
6966 __ li(t9, Operand(function));
6967 this->GenerateCall(masm, t9);
6968}
6969
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00006970
Ben Murdoch257744e2011-11-30 15:57:28 +00006971void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6972 Register target) {
6973 __ Move(t9, target);
6974 __ AssertStackIsAligned();
6975 // Allocate space for arg slots.
6976 __ Subu(sp, sp, kCArgsSlotsSize);
6977
6978 // Block the trampoline pool through the whole function to make sure the
6979 // number of generated instructions is constant.
6980 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6981
6982 // We need to get the current 'pc' value, which is not available on MIPS.
6983 Label find_ra;
6984 masm->bal(&find_ra); // ra = pc + 8.
6985 masm->nop(); // Branch delay slot nop.
6986 masm->bind(&find_ra);
6987
6988 const int kNumInstructionsToJump = 6;
6989 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6990 // Push return address (accessible to GC through exit frame pc).
6991 // This spot for ra was reserved in EnterExitFrame.
6992 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6993 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6994 RelocInfo::CODE_TARGET), true);
6995 // Call the function.
6996 masm->Jump(t9);
6997 // Make sure the stored 'ra' points to this position.
6998 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6999}
7000
7001
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007002void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7003 Label* miss,
7004 Label* done,
7005 Register receiver,
7006 Register properties,
7007 Handle<String> name,
7008 Register scratch0) {
7009 // If names of slots in range from 1 to kProbes - 1 for the hash value are
Ben Murdoch257744e2011-11-30 15:57:28 +00007010 // not equal to the name and kProbes-th slot is not used (its name is the
7011 // undefined value), it guarantees the hash table doesn't contain the
7012 // property. It's true even if some slots represent deleted properties
7013 // (their names are the null value).
7014 for (int i = 0; i < kInlinedProbes; i++) {
7015 // scratch0 points to properties hash.
7016 // Compute the masked index: (hash + i + i * i) & mask.
7017 Register index = scratch0;
7018 // Capacity is smi 2^n.
7019 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7020 __ Subu(index, index, Operand(1));
7021 __ And(index, index, Operand(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007022 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
Ben Murdoch257744e2011-11-30 15:57:28 +00007023
7024 // Scale the index by multiplying by the entry size.
7025 ASSERT(StringDictionary::kEntrySize == 3);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007026 __ sll(at, index, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00007027 __ Addu(index, index, at);
7028
7029 Register entity_name = scratch0;
7030 // Having undefined at this place means the name is not contained.
7031 ASSERT_EQ(kSmiTagSize, 1);
7032 Register tmp = properties;
Ben Murdoch257744e2011-11-30 15:57:28 +00007033 __ sll(scratch0, index, 1);
7034 __ Addu(tmp, properties, scratch0);
7035 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7036
7037 ASSERT(!tmp.is(entity_name));
7038 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7039 __ Branch(done, eq, entity_name, Operand(tmp));
7040
7041 if (i != kInlinedProbes - 1) {
7042 // Stop if found the property.
7043 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7044
7045 // Check if the entry name is not a symbol.
7046 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7047 __ lbu(entity_name,
7048 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7049 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7050 __ Branch(miss, eq, scratch0, Operand(zero_reg));
7051
7052 // Restore the properties.
7053 __ lw(properties,
7054 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7055 }
7056 }
7057
7058 const int spill_mask =
7059 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007060 a2.bit() | a1.bit() | a0.bit() | v0.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00007061
7062 __ MultiPush(spill_mask);
7063 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7064 __ li(a1, Operand(Handle<String>(name)));
7065 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007066 __ CallStub(&stub);
7067 __ mov(at, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00007068 __ MultiPop(spill_mask);
7069
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007070 __ Branch(done, eq, at, Operand(zero_reg));
7071 __ Branch(miss, ne, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00007072}
7073
7074
7075// Probe the string dictionary in the |elements| register. Jump to the
7076// |done| label if a property with the given name is found. Jump to
7077// the |miss| label otherwise.
7078// If lookup was successful |scratch2| will be equal to elements + 4 * index.
7079void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7080 Label* miss,
7081 Label* done,
7082 Register elements,
7083 Register name,
7084 Register scratch1,
7085 Register scratch2) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007086 ASSERT(!elements.is(scratch1));
7087 ASSERT(!elements.is(scratch2));
7088 ASSERT(!name.is(scratch1));
7089 ASSERT(!name.is(scratch2));
7090
Ben Murdoch257744e2011-11-30 15:57:28 +00007091 // Assert that name contains a string.
7092 if (FLAG_debug_code) __ AbortIfNotString(name);
7093
7094 // Compute the capacity mask.
7095 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7096 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7097 __ Subu(scratch1, scratch1, Operand(1));
7098
7099 // Generate an unrolled loop that performs a few probes before
7100 // giving up. Measurements done on Gmail indicate that 2 probes
7101 // cover ~93% of loads from dictionaries.
7102 for (int i = 0; i < kInlinedProbes; i++) {
7103 // Compute the masked index: (hash + i + i * i) & mask.
7104 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7105 if (i > 0) {
7106 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7107 // the hash in a separate instruction. The value hash + i + i * i is right
7108 // shifted in the following and instruction.
7109 ASSERT(StringDictionary::GetProbeOffset(i) <
7110 1 << (32 - String::kHashFieldOffset));
7111 __ Addu(scratch2, scratch2, Operand(
7112 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7113 }
7114 __ srl(scratch2, scratch2, String::kHashShift);
7115 __ And(scratch2, scratch1, scratch2);
7116
7117 // Scale the index by multiplying by the element size.
7118 ASSERT(StringDictionary::kEntrySize == 3);
7119 // scratch2 = scratch2 * 3.
7120
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007121 __ sll(at, scratch2, 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00007122 __ Addu(scratch2, scratch2, at);
7123
7124 // Check if the key is identical to the name.
7125 __ sll(at, scratch2, 2);
7126 __ Addu(scratch2, elements, at);
7127 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7128 __ Branch(done, eq, name, Operand(at));
7129 }
7130
7131 const int spill_mask =
7132 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007133 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
Ben Murdoch257744e2011-11-30 15:57:28 +00007134 ~(scratch1.bit() | scratch2.bit());
7135
7136 __ MultiPush(spill_mask);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007137 if (name.is(a0)) {
7138 ASSERT(!elements.is(a1));
7139 __ Move(a1, name);
7140 __ Move(a0, elements);
7141 } else {
7142 __ Move(a0, elements);
7143 __ Move(a1, name);
7144 }
Ben Murdoch257744e2011-11-30 15:57:28 +00007145 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7146 __ CallStub(&stub);
7147 __ mov(scratch2, a2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007148 __ mov(at, v0);
Ben Murdoch257744e2011-11-30 15:57:28 +00007149 __ MultiPop(spill_mask);
7150
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007151 __ Branch(done, ne, at, Operand(zero_reg));
7152 __ Branch(miss, eq, at, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00007153}
7154
7155
7156void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007157 // This stub overrides SometimesSetsUpAFrame() to return false. That means
7158 // we cannot call anything that could cause a GC from this stub.
Ben Murdoch257744e2011-11-30 15:57:28 +00007159 // Registers:
7160 // result: StringDictionary to probe
7161 // a1: key
7162 // : StringDictionary to probe.
7163 // index_: will hold an index of entry if lookup is successful.
7164 // might alias with result_.
7165 // Returns:
7166 // result_ is zero if lookup failed, non zero otherwise.
7167
7168 Register result = v0;
7169 Register dictionary = a0;
7170 Register key = a1;
7171 Register index = a2;
7172 Register mask = a3;
7173 Register hash = t0;
7174 Register undefined = t1;
7175 Register entry_key = t2;
7176
7177 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7178
7179 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7180 __ sra(mask, mask, kSmiTagSize);
7181 __ Subu(mask, mask, Operand(1));
7182
7183 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7184
7185 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7186
7187 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7188 // Compute the masked index: (hash + i + i * i) & mask.
7189 // Capacity is smi 2^n.
7190 if (i > 0) {
7191 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7192 // the hash in a separate instruction. The value hash + i + i * i is right
7193 // shifted in the following and instruction.
7194 ASSERT(StringDictionary::GetProbeOffset(i) <
7195 1 << (32 - String::kHashFieldOffset));
7196 __ Addu(index, hash, Operand(
7197 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7198 } else {
7199 __ mov(index, hash);
7200 }
7201 __ srl(index, index, String::kHashShift);
7202 __ And(index, mask, index);
7203
7204 // Scale the index by multiplying by the entry size.
7205 ASSERT(StringDictionary::kEntrySize == 3);
7206 // index *= 3.
7207 __ mov(at, index);
7208 __ sll(index, index, 1);
7209 __ Addu(index, index, at);
7210
7211
7212 ASSERT_EQ(kSmiTagSize, 1);
7213 __ sll(index, index, 2);
7214 __ Addu(index, index, dictionary);
7215 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7216
7217 // Having undefined at this place means the name is not contained.
7218 __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
7219
7220 // Stop if found the property.
7221 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7222
7223 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7224 // Check if the entry name is not a symbol.
7225 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7226 __ lbu(entry_key,
7227 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7228 __ And(result, entry_key, Operand(kIsSymbolMask));
7229 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7230 }
7231 }
7232
7233 __ bind(&maybe_in_dictionary);
7234 // If we are doing negative lookup then probing failure should be
7235 // treated as a lookup success. For positive lookup probing failure
7236 // should be treated as lookup failure.
7237 if (mode_ == POSITIVE_LOOKUP) {
7238 __ mov(result, zero_reg);
7239 __ Ret();
7240 }
7241
7242 __ bind(&in_dictionary);
7243 __ li(result, 1);
7244 __ Ret();
7245
7246 __ bind(&not_in_dictionary);
7247 __ mov(result, zero_reg);
7248 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01007249}
7250
7251
Ben Murdoch592a9fc2012-03-05 11:04:45 +00007252struct AheadOfTimeWriteBarrierStubList {
7253 Register object, value, address;
7254 RememberedSetAction action;
7255};
7256
7257
7258struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7259 // Used in RegExpExecStub.
7260 { s2, s0, t3, EMIT_REMEMBERED_SET },
7261 { s2, a2, t3, EMIT_REMEMBERED_SET },
7262 // Used in CompileArrayPushCall.
7263 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7264 // Also used in KeyedStoreIC::GenerateGeneric.
7265 { a3, t0, t1, EMIT_REMEMBERED_SET },
7266 // Used in CompileStoreGlobal.
7267 { t0, a1, a2, OMIT_REMEMBERED_SET },
7268 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7269 { a1, a2, a3, EMIT_REMEMBERED_SET },
7270 { a3, a2, a1, EMIT_REMEMBERED_SET },
7271 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7272 { a2, a1, a3, EMIT_REMEMBERED_SET },
7273 { a3, a1, a2, EMIT_REMEMBERED_SET },
7274 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7275 { t0, a2, a3, EMIT_REMEMBERED_SET },
7276 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7277 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7278 // and ElementsTransitionGenerator::GenerateDoubleToObject
7279 { a2, a3, t5, EMIT_REMEMBERED_SET },
7280 // ElementsTransitionGenerator::GenerateDoubleToObject
7281 { t2, a2, a0, EMIT_REMEMBERED_SET },
7282 { a2, t2, t5, EMIT_REMEMBERED_SET },
7283 // StoreArrayLiteralElementStub::Generate
7284 { t1, a0, t2, EMIT_REMEMBERED_SET },
7285 // Null termination.
7286 { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
7287};
7288
7289
7290bool RecordWriteStub::IsPregenerated() {
7291 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7292 !entry->object.is(no_reg);
7293 entry++) {
7294 if (object_.is(entry->object) &&
7295 value_.is(entry->value) &&
7296 address_.is(entry->address) &&
7297 remembered_set_action_ == entry->action &&
7298 save_fp_regs_mode_ == kDontSaveFPRegs) {
7299 return true;
7300 }
7301 }
7302 return false;
7303}
7304
7305
7306bool StoreBufferOverflowStub::IsPregenerated() {
7307 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7308}
7309
7310
7311void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7312 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7313 stub1.GetCode()->set_is_pregenerated(true);
7314}
7315
7316
7317void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7318 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7319 !entry->object.is(no_reg);
7320 entry++) {
7321 RecordWriteStub stub(entry->object,
7322 entry->value,
7323 entry->address,
7324 entry->action,
7325 kDontSaveFPRegs);
7326 stub.GetCode()->set_is_pregenerated(true);
7327 }
7328}
7329
7330
7331// Takes the input in 3 registers: address_ value_ and object_. A pointer to
7332// the value has just been written into the object, now this stub makes sure
7333// we keep the GC informed. The word in the object where the value has been
7334// written is in the address register.
7335void RecordWriteStub::Generate(MacroAssembler* masm) {
7336 Label skip_to_incremental_noncompacting;
7337 Label skip_to_incremental_compacting;
7338
7339 // The first two branch+nop instructions are generated with labels so as to
7340 // get the offset fixed up correctly by the bind(Label*) call. We patch it
7341 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7342 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7343 // incremental heap marking.
7344 // See RecordWriteStub::Patch for details.
7345 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7346 __ nop();
7347 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7348 __ nop();
7349
7350 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7351 __ RememberedSetHelper(object_,
7352 address_,
7353 value_,
7354 save_fp_regs_mode_,
7355 MacroAssembler::kReturnAtEnd);
7356 }
7357 __ Ret();
7358
7359 __ bind(&skip_to_incremental_noncompacting);
7360 GenerateIncremental(masm, INCREMENTAL);
7361
7362 __ bind(&skip_to_incremental_compacting);
7363 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7364
7365 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7366 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7367
7368 PatchBranchIntoNop(masm, 0);
7369 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7370}
7371
7372
7373void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7374 regs_.Save(masm);
7375
7376 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7377 Label dont_need_remembered_set;
7378
7379 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7380 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7381 regs_.scratch0(),
7382 &dont_need_remembered_set);
7383
7384 __ CheckPageFlag(regs_.object(),
7385 regs_.scratch0(),
7386 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7387 ne,
7388 &dont_need_remembered_set);
7389
7390 // First notify the incremental marker if necessary, then update the
7391 // remembered set.
7392 CheckNeedsToInformIncrementalMarker(
7393 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7394 InformIncrementalMarker(masm, mode);
7395 regs_.Restore(masm);
7396 __ RememberedSetHelper(object_,
7397 address_,
7398 value_,
7399 save_fp_regs_mode_,
7400 MacroAssembler::kReturnAtEnd);
7401
7402 __ bind(&dont_need_remembered_set);
7403 }
7404
7405 CheckNeedsToInformIncrementalMarker(
7406 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7407 InformIncrementalMarker(masm, mode);
7408 regs_.Restore(masm);
7409 __ Ret();
7410}
7411
7412
7413void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7414 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7415 int argument_count = 3;
7416 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7417 Register address =
7418 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7419 ASSERT(!address.is(regs_.object()));
7420 ASSERT(!address.is(a0));
7421 __ Move(address, regs_.address());
7422 __ Move(a0, regs_.object());
7423 if (mode == INCREMENTAL_COMPACTION) {
7424 __ Move(a1, address);
7425 } else {
7426 ASSERT(mode == INCREMENTAL);
7427 __ lw(a1, MemOperand(address, 0));
7428 }
7429 __ li(a2, Operand(ExternalReference::isolate_address()));
7430
7431 AllowExternalCallThatCantCauseGC scope(masm);
7432 if (mode == INCREMENTAL_COMPACTION) {
7433 __ CallCFunction(
7434 ExternalReference::incremental_evacuation_record_write_function(
7435 masm->isolate()),
7436 argument_count);
7437 } else {
7438 ASSERT(mode == INCREMENTAL);
7439 __ CallCFunction(
7440 ExternalReference::incremental_marking_record_write_function(
7441 masm->isolate()),
7442 argument_count);
7443 }
7444 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7445}
7446
7447
7448void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7449 MacroAssembler* masm,
7450 OnNoNeedToInformIncrementalMarker on_no_need,
7451 Mode mode) {
7452 Label on_black;
7453 Label need_incremental;
7454 Label need_incremental_pop_scratch;
7455
7456 // Let's look at the color of the object: If it is not black we don't have
7457 // to inform the incremental marker.
7458 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7459
7460 regs_.Restore(masm);
7461 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7462 __ RememberedSetHelper(object_,
7463 address_,
7464 value_,
7465 save_fp_regs_mode_,
7466 MacroAssembler::kReturnAtEnd);
7467 } else {
7468 __ Ret();
7469 }
7470
7471 __ bind(&on_black);
7472
7473 // Get the value from the slot.
7474 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7475
7476 if (mode == INCREMENTAL_COMPACTION) {
7477 Label ensure_not_white;
7478
7479 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7480 regs_.scratch1(), // Scratch.
7481 MemoryChunk::kEvacuationCandidateMask,
7482 eq,
7483 &ensure_not_white);
7484
7485 __ CheckPageFlag(regs_.object(),
7486 regs_.scratch1(), // Scratch.
7487 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7488 eq,
7489 &need_incremental);
7490
7491 __ bind(&ensure_not_white);
7492 }
7493
7494 // We need extra registers for this, so we push the object and the address
7495 // register temporarily.
7496 __ Push(regs_.object(), regs_.address());
7497 __ EnsureNotWhite(regs_.scratch0(), // The value.
7498 regs_.scratch1(), // Scratch.
7499 regs_.object(), // Scratch.
7500 regs_.address(), // Scratch.
7501 &need_incremental_pop_scratch);
7502 __ Pop(regs_.object(), regs_.address());
7503
7504 regs_.Restore(masm);
7505 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7506 __ RememberedSetHelper(object_,
7507 address_,
7508 value_,
7509 save_fp_regs_mode_,
7510 MacroAssembler::kReturnAtEnd);
7511 } else {
7512 __ Ret();
7513 }
7514
7515 __ bind(&need_incremental_pop_scratch);
7516 __ Pop(regs_.object(), regs_.address());
7517
7518 __ bind(&need_incremental);
7519
7520 // Fall through when we need to inform the incremental marker.
7521}
7522
7523
7524void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7525 // ----------- S t a t e -------------
7526 // -- a0 : element value to store
7527 // -- a1 : array literal
7528 // -- a2 : map of array literal
7529 // -- a3 : element index as smi
7530 // -- t0 : array literal index in function as smi
7531 // -----------------------------------
7532
7533 Label element_done;
7534 Label double_elements;
7535 Label smi_element;
7536 Label slow_elements;
7537 Label fast_elements;
7538
7539 __ CheckFastElements(a2, t1, &double_elements);
7540 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7541 __ JumpIfSmi(a0, &smi_element);
7542 __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7543
7544 // Store into the array literal requires a elements transition. Call into
7545 // the runtime.
7546 __ bind(&slow_elements);
7547 // call.
7548 __ Push(a1, a3, a0);
7549 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7550 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
7551 __ Push(t1, t0);
7552 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7553
7554 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7555 __ bind(&fast_elements);
7556 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7557 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7558 __ Addu(t2, t1, t2);
7559 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7560 __ sw(a0, MemOperand(t2, 0));
7561 // Update the write barrier for the array store.
7562 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7563 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7564 __ Ret(USE_DELAY_SLOT);
7565 __ mov(v0, a0);
7566
7567 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7568 // FAST_ELEMENTS, and value is Smi.
7569 __ bind(&smi_element);
7570 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7571 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7572 __ Addu(t2, t1, t2);
7573 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
7574 __ Ret(USE_DELAY_SLOT);
7575 __ mov(v0, a0);
7576
7577 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7578 __ bind(&double_elements);
7579 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7580 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
7581 &slow_elements);
7582 __ Ret(USE_DELAY_SLOT);
7583 __ mov(v0, a0);
7584}
7585
7586
Steve Block44f0eee2011-05-26 01:26:41 +01007587#undef __
7588
7589} } // namespace v8::internal
7590
7591#endif // V8_TARGET_ARCH_MIPS