blob: c9999949714127510e219802b767c3afae30b6d4 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000034#include "codegen.h"
Steve Block44f0eee2011-05-26 01:26:41 +010035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
Ben Murdoch257744e2011-11-30 15:57:28 +000043static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44 Label* slow,
45 Condition cc,
46 bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Register lhs,
49 Register rhs,
50 Label* rhs_not_nan,
51 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55 Register lhs,
56 Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
Steve Block44f0eee2011-05-26 01:26:41 +010068
69void ToNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000070 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ mov(v0, a0);
74 __ Ret();
75
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ mov(v0, a0);
79 __ Ret();
80
81 __ bind(&call_builtin);
82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +010084}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +000088 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
90 Label gc;
91
92 // Pop the function info from the stack.
93 __ pop(a3);
94
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
97 v0,
98 a1,
99 a2,
100 &gc,
101 TAG_OBJECT);
102
103 int map_index = strict_mode_ == kStrictMode
104 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
105 : Context::FUNCTION_MAP_INDEX;
106
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132
133 // Return result. The argument function info has been popped already.
134 __ Ret();
135
136 // Create a new closure through the slower runtime call.
137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000145 // Try to allocate the context in new space.
146 Label gc;
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
151 v0,
152 a1,
153 a2,
154 &gc,
155 TAG_OBJECT);
156
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
159
160 // Setup the object header.
161 __ LoadRoot(a2, Heap::kContextMapRootIndex);
162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
166 // Setup the fixed slots.
167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
169 __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172
173 // Copy the global object from the surrounding context.
174 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
175 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
176
177 // Initialize the rest of the slots to undefined.
178 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
179 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
180 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
181 }
182
183 // Remove the on-stack argument and return.
184 __ mov(cp, v0);
185 __ Pop();
186 __ Ret();
187
188 // Need to collect. Call into runtime system.
189 __ bind(&gc);
190 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100191}
192
193
194void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000195 // Stack layout on entry:
196 // [sp]: constant elements.
197 // [sp + kPointerSize]: literal index.
198 // [sp + (2 * kPointerSize)]: literals array.
199
200 // All sizes here are multiples of kPointerSize.
201 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
202 int size = JSArray::kSize + elements_size;
203
204 // Load boilerplate object into r3 and check if we need to create a
205 // boilerplate.
206 Label slow_case;
207 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
208 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
209 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
210 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
211 __ Addu(t0, a3, t0);
212 __ lw(a3, MemOperand(t0));
213 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
214 __ Branch(&slow_case, eq, a3, Operand(t1));
215
216 if (FLAG_debug_code) {
217 const char* message;
218 Heap::RootListIndex expected_map_index;
219 if (mode_ == CLONE_ELEMENTS) {
220 message = "Expected (writable) fixed array";
221 expected_map_index = Heap::kFixedArrayMapRootIndex;
222 } else {
223 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
224 message = "Expected copy-on-write fixed array";
225 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
226 }
227 __ push(a3);
228 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
229 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
230 __ LoadRoot(at, expected_map_index);
231 __ Assert(eq, message, a3, Operand(at));
232 __ pop(a3);
233 }
234
235 // Allocate both the JS array and the elements array in one big
236 // allocation. This avoids multiple limit checks.
237 // Return new object in v0.
238 __ AllocateInNewSpace(size,
239 v0,
240 a1,
241 a2,
242 &slow_case,
243 TAG_OBJECT);
244
245 // Copy the JS array part.
246 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
247 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
248 __ lw(a1, FieldMemOperand(a3, i));
249 __ sw(a1, FieldMemOperand(v0, i));
250 }
251 }
252
253 if (length_ > 0) {
254 // Get hold of the elements array of the boilerplate and setup the
255 // elements pointer in the resulting object.
256 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
257 __ Addu(a2, v0, Operand(JSArray::kSize));
258 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
259
260 // Copy the elements array.
261 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
262 }
263
264 // Return and remove the on-stack parameters.
265 __ Addu(sp, sp, Operand(3 * kPointerSize));
266 __ Ret();
267
268 __ bind(&slow_case);
269 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100270}
271
272
273// Takes a Smi and converts to an IEEE 64 bit floating point value in two
274// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
275// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
276// scratch register. Destroys the source register. No GC occurs during this
277// stub so you don't have to set up the frame.
278class ConvertToDoubleStub : public CodeStub {
279 public:
280 ConvertToDoubleStub(Register result_reg_1,
281 Register result_reg_2,
282 Register source_reg,
283 Register scratch_reg)
284 : result1_(result_reg_1),
285 result2_(result_reg_2),
286 source_(source_reg),
287 zeros_(scratch_reg) { }
288
289 private:
290 Register result1_;
291 Register result2_;
292 Register source_;
293 Register zeros_;
294
295 // Minor key encoding in 16 bits.
296 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
297 class OpBits: public BitField<Token::Value, 2, 14> {};
298
299 Major MajorKey() { return ConvertToDouble; }
300 int MinorKey() {
301 // Encode the parameters in a unique 16 bit value.
302 return result1_.code() +
303 (result2_.code() << 4) +
304 (source_.code() << 8) +
305 (zeros_.code() << 12);
306 }
307
308 void Generate(MacroAssembler* masm);
309
310 const char* GetName() { return "ConvertToDoubleStub"; }
311
312#ifdef DEBUG
313 void Print() { PrintF("ConvertToDoubleStub\n"); }
314#endif
315};
316
317
318void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000319#ifndef BIG_ENDIAN_FLOATING_POINT
320 Register exponent = result1_;
321 Register mantissa = result2_;
322#else
323 Register exponent = result2_;
324 Register mantissa = result1_;
325#endif
326 Label not_special;
327 // Convert from Smi to integer.
328 __ sra(source_, source_, kSmiTagSize);
329 // Move sign bit from source to destination. This works because the sign bit
330 // in the exponent word of the double has the same position and polarity as
331 // the 2's complement sign bit in a Smi.
332 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
333 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
334 // Subtract from 0 if source was negative.
335 __ subu(at, zero_reg, source_);
336 __ movn(source_, at, exponent);
337
338 // We have -1, 0 or 1, which we treat specially. Register source_ contains
339 // absolute value: it is either equal to 1 (special case of -1 and 1),
340 // greater than 1 (not a special case) or less than 1 (special case of 0).
341 __ Branch(&not_special, gt, source_, Operand(1));
342
343 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
344 static const uint32_t exponent_word_for_1 =
345 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
346 // Safe to use 'at' as dest reg here.
347 __ Or(at, exponent, Operand(exponent_word_for_1));
348 __ movn(exponent, at, source_); // Write exp when source not 0.
349 // 1, 0 and -1 all have 0 for the second word.
350 __ mov(mantissa, zero_reg);
351 __ Ret();
352
353 __ bind(&not_special);
354 // Count leading zeros.
355 // Gets the wrong answer for 0, but we already checked for that case above.
356 __ clz(zeros_, source_);
357 // Compute exponent and or it into the exponent register.
358 // We use mantissa as a scratch register here.
359 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
360 __ subu(mantissa, mantissa, zeros_);
361 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
362 __ Or(exponent, exponent, mantissa);
363
364 // Shift up the source chopping the top bit off.
365 __ Addu(zeros_, zeros_, Operand(1));
366 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
367 __ sllv(source_, source_, zeros_);
368 // Compute lower part of fraction (last 12 bits).
369 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
370 // And the top (top 20 bits).
371 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
372 __ or_(exponent, exponent, source_);
373
374 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100375}
376
377
Steve Block44f0eee2011-05-26 01:26:41 +0100378void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
379 FloatingPointHelper::Destination destination,
380 Register scratch1,
381 Register scratch2) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000382 if (CpuFeatures::IsSupported(FPU)) {
383 CpuFeatures::Scope scope(FPU);
384 __ sra(scratch1, a0, kSmiTagSize);
385 __ mtc1(scratch1, f14);
386 __ cvt_d_w(f14, f14);
387 __ sra(scratch1, a1, kSmiTagSize);
388 __ mtc1(scratch1, f12);
389 __ cvt_d_w(f12, f12);
390 if (destination == kCoreRegisters) {
391 __ Move(a2, a3, f14);
392 __ Move(a0, a1, f12);
393 }
394 } else {
395 ASSERT(destination == kCoreRegisters);
396 // Write Smi from a0 to a3 and a2 in double format.
397 __ mov(scratch1, a0);
398 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
399 __ push(ra);
400 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
401 // Write Smi from a1 to a1 and a0 in double format.
402 __ mov(scratch1, a1);
403 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
404 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
405 __ pop(ra);
406 }
Steve Block44f0eee2011-05-26 01:26:41 +0100407}
408
409
410void FloatingPointHelper::LoadOperands(
411 MacroAssembler* masm,
412 FloatingPointHelper::Destination destination,
413 Register heap_number_map,
414 Register scratch1,
415 Register scratch2,
416 Label* slow) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000417
418 // Load right operand (a0) to f12 or a2/a3.
419 LoadNumber(masm, destination,
420 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
421
422 // Load left operand (a1) to f14 or a0/a1.
423 LoadNumber(masm, destination,
424 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
Steve Block44f0eee2011-05-26 01:26:41 +0100425}
426
427
428void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
429 Destination destination,
430 Register object,
431 FPURegister dst,
432 Register dst1,
433 Register dst2,
434 Register heap_number_map,
435 Register scratch1,
436 Register scratch2,
437 Label* not_number) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000438 if (FLAG_debug_code) {
439 __ AbortIfNotRootValue(heap_number_map,
440 Heap::kHeapNumberMapRootIndex,
441 "HeapNumberMap register clobbered.");
442 }
443
444 Label is_smi, done;
445
446 __ JumpIfSmi(object, &is_smi);
447 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
448
449 // Handle loading a double from a heap number.
450 if (CpuFeatures::IsSupported(FPU) &&
451 destination == kFPURegisters) {
452 CpuFeatures::Scope scope(FPU);
453 // Load the double from tagged HeapNumber to double register.
454
455 // ARM uses a workaround here because of the unaligned HeapNumber
456 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
457 // point in generating even more instructions.
458 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
459 } else {
460 ASSERT(destination == kCoreRegisters);
461 // Load the double from heap number to dst1 and dst2 in double format.
462 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
463 __ lw(dst2, FieldMemOperand(object,
464 HeapNumber::kValueOffset + kPointerSize));
465 }
466 __ Branch(&done);
467
468 // Handle loading a double from a smi.
469 __ bind(&is_smi);
470 if (CpuFeatures::IsSupported(FPU)) {
471 CpuFeatures::Scope scope(FPU);
472 // Convert smi to double using FPU instructions.
473 __ SmiUntag(scratch1, object);
474 __ mtc1(scratch1, dst);
475 __ cvt_d_w(dst, dst);
476 if (destination == kCoreRegisters) {
477 // Load the converted smi to dst1 and dst2 in double format.
478 __ Move(dst1, dst2, dst);
479 }
480 } else {
481 ASSERT(destination == kCoreRegisters);
482 // Write smi to dst1 and dst2 double format.
483 __ mov(scratch1, object);
484 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
485 __ push(ra);
486 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
487 __ pop(ra);
488 }
489
490 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +0100491}
492
493
Ben Murdoch257744e2011-11-30 15:57:28 +0000494void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
495 Register object,
496 Register dst,
497 Register heap_number_map,
498 Register scratch1,
499 Register scratch2,
500 Register scratch3,
501 FPURegister double_scratch,
502 Label* not_number) {
503 if (FLAG_debug_code) {
504 __ AbortIfNotRootValue(heap_number_map,
505 Heap::kHeapNumberMapRootIndex,
506 "HeapNumberMap register clobbered.");
507 }
508 Label is_smi;
509 Label done;
510 Label not_in_int32_range;
511
512 __ JumpIfSmi(object, &is_smi);
513 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
514 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
515 __ ConvertToInt32(object,
516 dst,
517 scratch1,
518 scratch2,
519 double_scratch,
520 &not_in_int32_range);
521 __ jmp(&done);
522
523 __ bind(&not_in_int32_range);
524 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
525 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
526
527 __ EmitOutOfInt32RangeTruncate(dst,
528 scratch1,
529 scratch2,
530 scratch3);
531
532 __ jmp(&done);
533
534 __ bind(&is_smi);
535 __ SmiUntag(dst, object);
536 __ bind(&done);
537}
538
539
540void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
541 Register int_scratch,
542 Destination destination,
543 FPURegister double_dst,
544 Register dst1,
545 Register dst2,
546 Register scratch2,
547 FPURegister single_scratch) {
548 ASSERT(!int_scratch.is(scratch2));
549 ASSERT(!int_scratch.is(dst1));
550 ASSERT(!int_scratch.is(dst2));
551
552 Label done;
553
554 if (CpuFeatures::IsSupported(FPU)) {
555 CpuFeatures::Scope scope(FPU);
556 __ mtc1(int_scratch, single_scratch);
557 __ cvt_d_w(double_dst, single_scratch);
558 if (destination == kCoreRegisters) {
559 __ Move(dst1, dst2, double_dst);
560 }
561 } else {
562 Label fewer_than_20_useful_bits;
563 // Expected output:
564 // | dst2 | dst1 |
565 // | s | exp | mantissa |
566
567 // Check for zero.
568 __ mov(dst2, int_scratch);
569 __ mov(dst1, int_scratch);
570 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
571
572 // Preload the sign of the value.
573 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
574 // Get the absolute value of the object (as an unsigned integer).
575 Label skip_sub;
576 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
577 __ Subu(int_scratch, zero_reg, int_scratch);
578 __ bind(&skip_sub);
579
580 // Get mantisssa[51:20].
581
582 // Get the position of the first set bit.
583 __ clz(dst1, int_scratch);
584 __ li(scratch2, 31);
585 __ Subu(dst1, scratch2, dst1);
586
587 // Set the exponent.
588 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
589 __ Ins(dst2, scratch2,
590 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
591
592 // Clear the first non null bit.
593 __ li(scratch2, Operand(1));
594 __ sllv(scratch2, scratch2, dst1);
595 __ li(at, -1);
596 __ Xor(scratch2, scratch2, at);
597 __ And(int_scratch, int_scratch, scratch2);
598
599 // Get the number of bits to set in the lower part of the mantissa.
600 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
601 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
602 // Set the higher 20 bits of the mantissa.
603 __ srlv(at, int_scratch, scratch2);
604 __ or_(dst2, dst2, at);
605 __ li(at, 32);
606 __ subu(scratch2, at, scratch2);
607 __ sllv(dst1, int_scratch, scratch2);
608 __ Branch(&done);
609
610 __ bind(&fewer_than_20_useful_bits);
611 __ li(at, HeapNumber::kMantissaBitsInTopWord);
612 __ subu(scratch2, at, dst1);
613 __ sllv(scratch2, int_scratch, scratch2);
614 __ Or(dst2, dst2, scratch2);
615 // Set dst1 to 0.
616 __ mov(dst1, zero_reg);
617 }
618 __ bind(&done);
619}
620
621
622void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
623 Register object,
624 Destination destination,
625 FPURegister double_dst,
626 Register dst1,
627 Register dst2,
628 Register heap_number_map,
629 Register scratch1,
630 Register scratch2,
631 FPURegister single_scratch,
632 Label* not_int32) {
633 ASSERT(!scratch1.is(object) && !scratch2.is(object));
634 ASSERT(!scratch1.is(scratch2));
635 ASSERT(!heap_number_map.is(object) &&
636 !heap_number_map.is(scratch1) &&
637 !heap_number_map.is(scratch2));
638
639 Label done, obj_is_not_smi;
640
641 __ JumpIfNotSmi(object, &obj_is_not_smi);
642 __ SmiUntag(scratch1, object);
643 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
644 scratch2, single_scratch);
645 __ Branch(&done);
646
647 __ bind(&obj_is_not_smi);
648 if (FLAG_debug_code) {
649 __ AbortIfNotRootValue(heap_number_map,
650 Heap::kHeapNumberMapRootIndex,
651 "HeapNumberMap register clobbered.");
652 }
653 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
654
655 // Load the number.
656 if (CpuFeatures::IsSupported(FPU)) {
657 CpuFeatures::Scope scope(FPU);
658 // Load the double value.
659 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
660
661 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
662 // On MIPS a lot of things cannot be implemented the same way so right
663 // now it makes a lot more sense to just do things manually.
664
665 // Save FCSR.
666 __ cfc1(scratch1, FCSR);
667 // Disable FPU exceptions.
668 __ ctc1(zero_reg, FCSR);
669 __ trunc_w_d(single_scratch, double_dst);
670 // Retrieve FCSR.
671 __ cfc1(scratch2, FCSR);
672 // Restore FCSR.
673 __ ctc1(scratch1, FCSR);
674
675 // Check for inexact conversion.
676 __ srl(scratch2, scratch2, kFCSRFlagShift);
677 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
678
679 // Jump to not_int32 if the operation did not succeed.
680 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
681
682 if (destination == kCoreRegisters) {
683 __ Move(dst1, dst2, double_dst);
684 }
685
686 } else {
687 ASSERT(!scratch1.is(object) && !scratch2.is(object));
688 // Load the double value in the destination registers.
689 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
690 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
691
692 // Check for 0 and -0.
693 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
694 __ Or(scratch1, scratch1, Operand(dst2));
695 __ Branch(&done, eq, scratch1, Operand(zero_reg));
696
697 // Check that the value can be exactly represented by a 32-bit integer.
698 // Jump to not_int32 if that's not the case.
699 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
700
701 // dst1 and dst2 were trashed. Reload the double value.
702 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
703 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
704 }
705
706 __ bind(&done);
707}
708
709
710void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
711 Register object,
712 Register dst,
713 Register heap_number_map,
714 Register scratch1,
715 Register scratch2,
716 Register scratch3,
717 FPURegister double_scratch,
718 Label* not_int32) {
719 ASSERT(!dst.is(object));
720 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
721 ASSERT(!scratch1.is(scratch2) &&
722 !scratch1.is(scratch3) &&
723 !scratch2.is(scratch3));
724
725 Label done;
726
727 // Untag the object into the destination register.
728 __ SmiUntag(dst, object);
729 // Just return if the object is a smi.
730 __ JumpIfSmi(object, &done);
731
732 if (FLAG_debug_code) {
733 __ AbortIfNotRootValue(heap_number_map,
734 Heap::kHeapNumberMapRootIndex,
735 "HeapNumberMap register clobbered.");
736 }
737 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
738
739 // Object is a heap number.
740 // Convert the floating point value to a 32-bit integer.
741 if (CpuFeatures::IsSupported(FPU)) {
742 CpuFeatures::Scope scope(FPU);
743 // Load the double value.
744 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
745
746 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
747 // On MIPS a lot of things cannot be implemented the same way so right
748 // now it makes a lot more sense to just do things manually.
749
750 // Save FCSR.
751 __ cfc1(scratch1, FCSR);
752 // Disable FPU exceptions.
753 __ ctc1(zero_reg, FCSR);
754 __ trunc_w_d(double_scratch, double_scratch);
755 // Retrieve FCSR.
756 __ cfc1(scratch2, FCSR);
757 // Restore FCSR.
758 __ ctc1(scratch1, FCSR);
759
760 // Check for inexact conversion.
761 __ srl(scratch2, scratch2, kFCSRFlagShift);
762 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
763
764 // Jump to not_int32 if the operation did not succeed.
765 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
766 // Get the result in the destination register.
767 __ mfc1(dst, double_scratch);
768
769 } else {
770 // Load the double value in the destination registers.
771 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
772 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
773
774 // Check for 0 and -0.
775 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
776 __ Or(dst, scratch2, Operand(dst));
777 __ Branch(&done, eq, dst, Operand(zero_reg));
778
779 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
780
781 // Registers state after DoubleIs32BitInteger.
782 // dst: mantissa[51:20].
783 // scratch2: 1
784
785 // Shift back the higher bits of the mantissa.
786 __ srlv(dst, dst, scratch3);
787 // Set the implicit first bit.
788 __ li(at, 32);
789 __ subu(scratch3, at, scratch3);
790 __ sllv(scratch2, scratch2, scratch3);
791 __ Or(dst, dst, scratch2);
792 // Set the sign.
793 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
794 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
795 Label skip_sub;
796 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
797 __ Subu(dst, zero_reg, dst);
798 __ bind(&skip_sub);
799 }
800
801 __ bind(&done);
802}
803
804
805void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
806 Register src1,
807 Register src2,
808 Register dst,
809 Register scratch,
810 Label* not_int32) {
811 // Get exponent alone in scratch.
812 __ Ext(scratch,
813 src1,
814 HeapNumber::kExponentShift,
815 HeapNumber::kExponentBits);
816
817 // Substract the bias from the exponent.
818 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
819
820 // src1: higher (exponent) part of the double value.
821 // src2: lower (mantissa) part of the double value.
822 // scratch: unbiased exponent.
823
824 // Fast cases. Check for obvious non 32-bit integer values.
825 // Negative exponent cannot yield 32-bit integers.
826 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
827 // Exponent greater than 31 cannot yield 32-bit integers.
828 // Also, a positive value with an exponent equal to 31 is outside of the
829 // signed 32-bit integer range.
830 // Another way to put it is that if (exponent - signbit) > 30 then the
831 // number cannot be represented as an int32.
832 Register tmp = dst;
833 __ srl(at, src1, 31);
834 __ subu(tmp, scratch, at);
835 __ Branch(not_int32, gt, tmp, Operand(30));
836 // - Bits [21:0] in the mantissa are not null.
837 __ And(tmp, src2, 0x3fffff);
838 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
839
840 // Otherwise the exponent needs to be big enough to shift left all the
841 // non zero bits left. So we need the (30 - exponent) last bits of the
842 // 31 higher bits of the mantissa to be null.
843 // Because bits [21:0] are null, we can check instead that the
844 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
845
846 // Get the 32 higher bits of the mantissa in dst.
847 __ Ext(dst,
848 src2,
849 HeapNumber::kMantissaBitsInTopWord,
850 32 - HeapNumber::kMantissaBitsInTopWord);
851 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
852 __ or_(dst, dst, at);
853
854 // Create the mask and test the lower bits (of the higher bits).
855 __ li(at, 32);
856 __ subu(scratch, at, scratch);
857 __ li(src2, 1);
858 __ sllv(src1, src2, scratch);
859 __ Subu(src1, src1, Operand(1));
860 __ And(src1, dst, src1);
861 __ Branch(not_int32, ne, src1, Operand(zero_reg));
862}
863
864
865void FloatingPointHelper::CallCCodeForDoubleOperation(
866 MacroAssembler* masm,
867 Token::Value op,
868 Register heap_number_result,
869 Register scratch) {
870 // Using core registers:
871 // a0: Left value (least significant part of mantissa).
872 // a1: Left value (sign, exponent, top of mantissa).
873 // a2: Right value (least significant part of mantissa).
874 // a3: Right value (sign, exponent, top of mantissa).
875
876 // Assert that heap_number_result is saved.
877 // We currently always use s0 to pass it.
878 ASSERT(heap_number_result.is(s0));
879
880 // Push the current return address before the C call.
881 __ push(ra);
882 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
883 if (!IsMipsSoftFloatABI) {
884 CpuFeatures::Scope scope(FPU);
885 // We are not using MIPS FPU instructions, and parameters for the runtime
886 // function call are prepaired in a0-a3 registers, but function we are
887 // calling is compiled with hard-float flag and expecting hard float ABI
888 // (parameters in f12/f14 registers). We need to copy parameters from
889 // a0-a3 registers to f12/f14 register pairs.
890 __ Move(f12, a0, a1);
891 __ Move(f14, a2, a3);
892 }
893 // Call C routine that may not cause GC or other trouble.
894 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
895 4);
896 // Store answer in the overwritable heap number.
897 if (!IsMipsSoftFloatABI) {
898 CpuFeatures::Scope scope(FPU);
899 // Double returned in register f0.
900 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
901 } else {
902 // Double returned in registers v0 and v1.
903 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
904 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
905 }
906 // Place heap_number_result in v0 and return to the pushed return address.
907 __ mov(v0, heap_number_result);
908 __ pop(ra);
909 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100910}
911
912
913// See comment for class, this does NOT work for int32's that are in Smi range.
914void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000915 Label max_negative_int;
916 // the_int_ has the answer which is a signed int32 but not a Smi.
917 // We test for the special value that has a different exponent.
918 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
919 // Test sign, and save for later conditionals.
920 __ And(sign_, the_int_, Operand(0x80000000u));
921 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
922
923 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
924 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
925 uint32_t non_smi_exponent =
926 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
927 __ li(scratch_, Operand(non_smi_exponent));
928 // Set the sign bit in scratch_ if the value was negative.
929 __ or_(scratch_, scratch_, sign_);
930 // Subtract from 0 if the value was negative.
931 __ subu(at, zero_reg, the_int_);
932 __ movn(the_int_, at, sign_);
933 // We should be masking the implict first digit of the mantissa away here,
934 // but it just ends up combining harmlessly with the last digit of the
935 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
936 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
937 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
938 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
939 __ srl(at, the_int_, shift_distance);
940 __ or_(scratch_, scratch_, at);
941 __ sw(scratch_, FieldMemOperand(the_heap_number_,
942 HeapNumber::kExponentOffset));
943 __ sll(scratch_, the_int_, 32 - shift_distance);
944 __ sw(scratch_, FieldMemOperand(the_heap_number_,
945 HeapNumber::kMantissaOffset));
946 __ Ret();
947
948 __ bind(&max_negative_int);
949 // The max negative int32 is stored as a positive number in the mantissa of
950 // a double because it uses a sign bit instead of using two's complement.
951 // The actual mantissa bits stored are all 0 because the implicit most
952 // significant 1 bit is not stored.
953 non_smi_exponent += 1 << HeapNumber::kExponentShift;
954 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
955 __ sw(scratch_,
956 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
957 __ mov(scratch_, zero_reg);
958 __ sw(scratch_,
959 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
960 __ Ret();
961}
962
963
964// Handle the case where the lhs and rhs are the same object.
965// Equality is almost reflexive (everything but NaN), so this is a test
966// for "identity and not NaN".
967static void EmitIdenticalObjectComparison(MacroAssembler* masm,
968 Label* slow,
969 Condition cc,
970 bool never_nan_nan) {
971 Label not_identical;
972 Label heap_number, return_equal;
973 Register exp_mask_reg = t5;
974
975 __ Branch(&not_identical, ne, a0, Operand(a1));
976
977 // The two objects are identical. If we know that one of them isn't NaN then
978 // we now know they test equal.
979 if (cc != eq || !never_nan_nan) {
980 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
981
982 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
983 // so we do the second best thing - test it ourselves.
984 // They are both equal and they are not both Smis so both of them are not
985 // Smis. If it's not a heap number, then return equal.
986 if (cc == less || cc == greater) {
987 __ GetObjectType(a0, t4, t4);
988 __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
989 } else {
990 __ GetObjectType(a0, t4, t4);
991 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
992 // Comparing JS objects with <=, >= is complicated.
993 if (cc != eq) {
994 __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
995 // Normally here we fall through to return_equal, but undefined is
996 // special: (undefined == undefined) == true, but
997 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
998 if (cc == less_equal || cc == greater_equal) {
999 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1000 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1001 __ Branch(&return_equal, ne, a0, Operand(t2));
1002 if (cc == le) {
1003 // undefined <= undefined should fail.
1004 __ li(v0, Operand(GREATER));
1005 } else {
1006 // undefined >= undefined should fail.
1007 __ li(v0, Operand(LESS));
1008 }
1009 __ Ret();
1010 }
1011 }
1012 }
1013 }
1014
1015 __ bind(&return_equal);
1016 if (cc == less) {
1017 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1018 } else if (cc == greater) {
1019 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1020 } else {
1021 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1022 }
1023 __ Ret();
1024
1025 if (cc != eq || !never_nan_nan) {
1026 // For less and greater we don't have to check for NaN since the result of
1027 // x < x is false regardless. For the others here is some code to check
1028 // for NaN.
1029 if (cc != lt && cc != gt) {
1030 __ bind(&heap_number);
1031 // It is a heap number, so return non-equal if it's NaN and equal if it's
1032 // not NaN.
1033
1034 // The representation of NaN values has all exponent bits (52..62) set,
1035 // and not all mantissa bits (0..51) clear.
1036 // Read top bits of double representation (second word of value).
1037 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1038 // Test that exponent bits are all set.
1039 __ And(t3, t2, Operand(exp_mask_reg));
1040 // If all bits not set (ne cond), then not a NaN, objects are equal.
1041 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1042
1043 // Shift out flag and all exponent bits, retaining only mantissa.
1044 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1045 // Or with all low-bits of mantissa.
1046 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1047 __ Or(v0, t3, Operand(t2));
1048 // For equal we already have the right value in v0: Return zero (equal)
1049 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1050 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1051 // value if it's a NaN.
1052 if (cc != eq) {
1053 // All-zero means Infinity means equal.
1054 __ Ret(eq, v0, Operand(zero_reg));
1055 if (cc == le) {
1056 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1057 } else {
1058 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1059 }
1060 }
1061 __ Ret();
1062 }
1063 // No fall through here.
1064 }
1065
1066 __ bind(&not_identical);
1067}
1068
1069
1070static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1071 Register lhs,
1072 Register rhs,
1073 Label* both_loaded_as_doubles,
1074 Label* slow,
1075 bool strict) {
1076 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1077 (lhs.is(a1) && rhs.is(a0)));
1078
1079 Label lhs_is_smi;
1080 __ And(t0, lhs, Operand(kSmiTagMask));
1081 __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
1082 // Rhs is a Smi.
1083 // Check whether the non-smi is a heap number.
1084 __ GetObjectType(lhs, t4, t4);
1085 if (strict) {
1086 // If lhs was not a number and rhs was a Smi then strict equality cannot
1087 // succeed. Return non-equal (lhs is already not zero).
1088 __ mov(v0, lhs);
1089 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1090 } else {
1091 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1092 // the runtime.
1093 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1094 }
1095
1096 // Rhs is a smi, lhs is a number.
1097 // Convert smi rhs to double.
1098 if (CpuFeatures::IsSupported(FPU)) {
1099 CpuFeatures::Scope scope(FPU);
1100 __ sra(at, rhs, kSmiTagSize);
1101 __ mtc1(at, f14);
1102 __ cvt_d_w(f14, f14);
1103 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1104 } else {
1105 // Load lhs to a double in a2, a3.
1106 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1107 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1108
1109 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1110 __ mov(t6, rhs);
1111 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1112 __ push(ra);
1113 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
1114
1115 __ pop(ra);
1116 }
1117
1118 // We now have both loaded as doubles.
1119 __ jmp(both_loaded_as_doubles);
1120
1121 __ bind(&lhs_is_smi);
1122 // Lhs is a Smi. Check whether the non-smi is a heap number.
1123 __ GetObjectType(rhs, t4, t4);
1124 if (strict) {
1125 // If lhs was not a number and rhs was a Smi then strict equality cannot
1126 // succeed. Return non-equal.
1127 __ li(v0, Operand(1));
1128 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1129 } else {
1130 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1131 // the runtime.
1132 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1133 }
1134
1135 // Lhs is a smi, rhs is a number.
1136 // Convert smi lhs to double.
1137 if (CpuFeatures::IsSupported(FPU)) {
1138 CpuFeatures::Scope scope(FPU);
1139 __ sra(at, lhs, kSmiTagSize);
1140 __ mtc1(at, f12);
1141 __ cvt_d_w(f12, f12);
1142 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1143 } else {
1144 // Convert lhs to a double format. t5 is scratch.
1145 __ mov(t6, lhs);
1146 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1147 __ push(ra);
1148 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
1149 __ pop(ra);
1150 // Load rhs to a double in a1, a0.
1151 if (rhs.is(a0)) {
1152 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1153 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1154 } else {
1155 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1156 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1157 }
1158 }
1159 // Fall through to both_loaded_as_doubles.
Steve Block44f0eee2011-05-26 01:26:41 +01001160}
1161
1162
1163void EmitNanCheck(MacroAssembler* masm, Condition cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001164 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1165 if (CpuFeatures::IsSupported(FPU)) {
1166 CpuFeatures::Scope scope(FPU);
1167 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1168 __ Move(t0, t1, f14);
1169 __ Move(t2, t3, f12);
1170 } else {
1171 // Lhs and rhs are already loaded to GP registers.
1172 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1173 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1174 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1175 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1176 }
1177 Register rhs_exponent = exp_first ? t0 : t1;
1178 Register lhs_exponent = exp_first ? t2 : t3;
1179 Register rhs_mantissa = exp_first ? t1 : t0;
1180 Register lhs_mantissa = exp_first ? t3 : t2;
1181 Label one_is_nan, neither_is_nan;
1182 Label lhs_not_nan_exp_mask_is_loaded;
1183
1184 Register exp_mask_reg = t4;
1185 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1186 __ and_(t5, lhs_exponent, exp_mask_reg);
1187 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1188
1189 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1190 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1191
1192 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1193
1194 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1195 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1196 __ and_(t5, rhs_exponent, exp_mask_reg);
1197
1198 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1199
1200 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1201 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1202
1203 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1204
1205 __ bind(&one_is_nan);
1206 // NaN comparisons always fail.
1207 // Load whatever we need in v0 to make the comparison fail.
1208 if (cc == lt || cc == le) {
1209 __ li(v0, Operand(GREATER));
1210 } else {
1211 __ li(v0, Operand(LESS));
1212 }
1213 __ Ret(); // Return.
1214
1215 __ bind(&neither_is_nan);
1216}
1217
1218
1219static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1220 // f12 and f14 have the two doubles. Neither is a NaN.
1221 // Call a native function to do a comparison between two non-NaNs.
1222 // Call C routine that may not cause GC or other trouble.
1223 // We use a call_was and return manually because we need arguments slots to
1224 // be freed.
1225
1226 Label return_result_not_equal, return_result_equal;
1227 if (cc == eq) {
1228 // Doubles are not equal unless they have the same bit pattern.
1229 // Exception: 0 and -0.
1230 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1231 if (CpuFeatures::IsSupported(FPU)) {
1232 CpuFeatures::Scope scope(FPU);
1233 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1234 __ Move(t0, t1, f14);
1235 __ Move(t2, t3, f12);
1236 } else {
1237 // Lhs and rhs are already loaded to GP registers.
1238 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1239 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1240 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1241 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1242 }
1243 Register rhs_exponent = exp_first ? t0 : t1;
1244 Register lhs_exponent = exp_first ? t2 : t3;
1245 Register rhs_mantissa = exp_first ? t1 : t0;
1246 Register lhs_mantissa = exp_first ? t3 : t2;
1247
1248 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1249 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1250
1251 __ subu(v0, rhs_exponent, lhs_exponent);
1252 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1253 // 0, -0 case.
1254 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1255 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1256 __ or_(t4, rhs_exponent, lhs_exponent);
1257 __ or_(t4, t4, rhs_mantissa);
1258
1259 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1260
1261 __ bind(&return_result_equal);
1262 __ li(v0, Operand(EQUAL));
1263 __ Ret();
1264 }
1265
1266 __ bind(&return_result_not_equal);
1267
1268 if (!CpuFeatures::IsSupported(FPU)) {
1269 __ push(ra);
1270 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
1271 if (!IsMipsSoftFloatABI) {
1272 // We are not using MIPS FPU instructions, and parameters for the runtime
1273 // function call are prepaired in a0-a3 registers, but function we are
1274 // calling is compiled with hard-float flag and expecting hard float ABI
1275 // (parameters in f12/f14 registers). We need to copy parameters from
1276 // a0-a3 registers to f12/f14 register pairs.
1277 __ Move(f12, a0, a1);
1278 __ Move(f14, a2, a3);
1279 }
1280 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
1281 __ pop(ra); // Because this function returns int, result is in v0.
1282 __ Ret();
1283 } else {
1284 CpuFeatures::Scope scope(FPU);
1285 Label equal, less_than;
1286 __ c(EQ, D, f12, f14);
1287 __ bc1t(&equal);
1288 __ nop();
1289
1290 __ c(OLT, D, f12, f14);
1291 __ bc1t(&less_than);
1292 __ nop();
1293
1294 // Not equal, not less, not NaN, must be greater.
1295 __ li(v0, Operand(GREATER));
1296 __ Ret();
1297
1298 __ bind(&equal);
1299 __ li(v0, Operand(EQUAL));
1300 __ Ret();
1301
1302 __ bind(&less_than);
1303 __ li(v0, Operand(LESS));
1304 __ Ret();
1305 }
1306}
1307
1308
1309static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1310 Register lhs,
1311 Register rhs) {
1312 // If either operand is a JSObject or an oddball value, then they are
1313 // not equal since their pointers are different.
1314 // There is no test for undetectability in strict equality.
1315 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1316 Label first_non_object;
1317 // Get the type of the first operand into a2 and compare it with
1318 // FIRST_JS_OBJECT_TYPE.
1319 __ GetObjectType(lhs, a2, a2);
1320 __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE));
1321
1322 // Return non-zero.
1323 Label return_not_equal;
1324 __ bind(&return_not_equal);
1325 __ li(v0, Operand(1));
1326 __ Ret();
1327
1328 __ bind(&first_non_object);
1329 // Check for oddballs: true, false, null, undefined.
1330 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1331
1332 __ GetObjectType(rhs, a3, a3);
1333 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE));
1334
1335 // Check for oddballs: true, false, null, undefined.
1336 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1337
1338 // Now that we have the types we might as well check for symbol-symbol.
1339 // Ensure that no non-strings have the symbol bit set.
1340 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1341 STATIC_ASSERT(kSymbolTag != 0);
1342 __ And(t2, a2, Operand(a3));
1343 __ And(t0, t2, Operand(kIsSymbolMask));
1344 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1345}
1346
1347
1348static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1349 Register lhs,
1350 Register rhs,
1351 Label* both_loaded_as_doubles,
1352 Label* not_heap_numbers,
1353 Label* slow) {
1354 __ GetObjectType(lhs, a3, a2);
1355 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1356 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1357 // If first was a heap number & second wasn't, go to slow case.
1358 __ Branch(slow, ne, a3, Operand(a2));
1359
1360 // Both are heap numbers. Load them up then jump to the code we have
1361 // for that.
1362 if (CpuFeatures::IsSupported(FPU)) {
1363 CpuFeatures::Scope scope(FPU);
1364 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1365 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1366 } else {
1367 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1368 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1369 if (rhs.is(a0)) {
1370 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1371 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1372 } else {
1373 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1374 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1375 }
1376 }
1377 __ jmp(both_loaded_as_doubles);
1378}
1379
1380
1381// Fast negative check for symbol-to-symbol equality.
1382static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1383 Register lhs,
1384 Register rhs,
1385 Label* possible_strings,
1386 Label* not_both_strings) {
1387 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1388 (lhs.is(a1) && rhs.is(a0)));
1389
1390 // a2 is object type of lhs.
1391 // Ensure that no non-strings have the symbol bit set.
1392 Label object_test;
1393 STATIC_ASSERT(kSymbolTag != 0);
1394 __ And(at, a2, Operand(kIsNotStringMask));
1395 __ Branch(&object_test, ne, at, Operand(zero_reg));
1396 __ And(at, a2, Operand(kIsSymbolMask));
1397 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1398 __ GetObjectType(rhs, a3, a3);
1399 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1400 __ And(at, a3, Operand(kIsSymbolMask));
1401 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1402
1403 // Both are symbols. We already checked they weren't the same pointer
1404 // so they are not equal.
1405 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1406 __ Ret();
1407
1408 __ bind(&object_test);
1409 __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE));
1410 __ GetObjectType(rhs, a2, a3);
1411 __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
1412
1413 // If both objects are undetectable, they are equal. Otherwise, they
1414 // are not equal, since they are different objects and an object is not
1415 // equal to undefined.
1416 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1417 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1418 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1419 __ and_(a0, a2, a3);
1420 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1421 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1422 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001423}
1424
1425
1426void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1427 Register object,
1428 Register result,
1429 Register scratch1,
1430 Register scratch2,
1431 Register scratch3,
1432 bool object_is_smi,
1433 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001434 // Use of registers. Register result is used as a temporary.
1435 Register number_string_cache = result;
1436 Register mask = scratch3;
1437
1438 // Load the number string cache.
1439 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1440
1441 // Make the hash mask from the length of the number string cache. It
1442 // contains two elements (number and string) for each cache entry.
1443 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1444 // Divide length by two (length is a smi).
1445 __ sra(mask, mask, kSmiTagSize + 1);
1446 __ Addu(mask, mask, -1); // Make mask.
1447
1448 // Calculate the entry in the number string cache. The hash value in the
1449 // number string cache for smis is just the smi value, and the hash for
1450 // doubles is the xor of the upper and lower words. See
1451 // Heap::GetNumberStringCache.
1452 Isolate* isolate = masm->isolate();
1453 Label is_smi;
1454 Label load_result_from_cache;
1455 if (!object_is_smi) {
1456 __ JumpIfSmi(object, &is_smi);
1457 if (CpuFeatures::IsSupported(FPU)) {
1458 CpuFeatures::Scope scope(FPU);
1459 __ CheckMap(object,
1460 scratch1,
1461 Heap::kHeapNumberMapRootIndex,
1462 not_found,
1463 DONT_DO_SMI_CHECK);
1464
1465 STATIC_ASSERT(8 == kDoubleSize);
1466 __ Addu(scratch1,
1467 object,
1468 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1469 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1470 __ lw(scratch1, MemOperand(scratch1, 0));
1471 __ Xor(scratch1, scratch1, Operand(scratch2));
1472 __ And(scratch1, scratch1, Operand(mask));
1473
1474 // Calculate address of entry in string cache: each entry consists
1475 // of two pointer sized fields.
1476 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1477 __ Addu(scratch1, number_string_cache, scratch1);
1478
1479 Register probe = mask;
1480 __ lw(probe,
1481 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1482 __ JumpIfSmi(probe, not_found);
1483 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1484 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1485 __ c(EQ, D, f12, f14);
1486 __ bc1t(&load_result_from_cache);
1487 __ nop(); // bc1t() requires explicit fill of branch delay slot.
1488 __ Branch(not_found);
1489 } else {
1490 // Note that there is no cache check for non-FPU case, even though
1491 // it seems there could be. May be a tiny opimization for non-FPU
1492 // cores.
1493 __ Branch(not_found);
1494 }
1495 }
1496
1497 __ bind(&is_smi);
1498 Register scratch = scratch1;
1499 __ sra(scratch, object, 1); // Shift away the tag.
1500 __ And(scratch, mask, Operand(scratch));
1501
1502 // Calculate address of entry in string cache: each entry consists
1503 // of two pointer sized fields.
1504 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1505 __ Addu(scratch, number_string_cache, scratch);
1506
1507 // Check if the entry is the smi we are looking for.
1508 Register probe = mask;
1509 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1510 __ Branch(not_found, ne, object, Operand(probe));
1511
1512 // Get the result from the cache.
1513 __ bind(&load_result_from_cache);
1514 __ lw(result,
1515 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1516
1517 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1518 1,
1519 scratch1,
1520 scratch2);
Steve Block44f0eee2011-05-26 01:26:41 +01001521}
1522
1523
1524void NumberToStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001525 Label runtime;
1526
1527 __ lw(a1, MemOperand(sp, 0));
1528
1529 // Generate code to lookup number in the number string cache.
1530 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1531 __ Addu(sp, sp, Operand(1 * kPointerSize));
1532 __ Ret();
1533
1534 __ bind(&runtime);
1535 // Handle number to string in the runtime system if not found in the cache.
1536 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01001537}
1538
1539
1540// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1541// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1542// of the comparison.
1543void CompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001544 Label slow; // Call builtin.
1545 Label not_smis, both_loaded_as_doubles;
1546
1547
1548 if (include_smi_compare_) {
1549 Label not_two_smis, smi_done;
1550 __ Or(a2, a1, a0);
1551 __ JumpIfNotSmi(a2, &not_two_smis);
1552 __ sra(a1, a1, 1);
1553 __ sra(a0, a0, 1);
1554 __ Subu(v0, a1, a0);
1555 __ Ret();
1556 __ bind(&not_two_smis);
1557 } else if (FLAG_debug_code) {
1558 __ Or(a2, a1, a0);
1559 __ And(a2, a2, kSmiTagMask);
1560 __ Assert(ne, "CompareStub: unexpected smi operands.",
1561 a2, Operand(zero_reg));
1562 }
1563
1564
1565 // NOTICE! This code is only reached after a smi-fast-case check, so
1566 // it is certain that at least one operand isn't a smi.
1567
1568 // Handle the case where the objects are identical. Either returns the answer
1569 // or goes to slow. Only falls through if the objects were not identical.
1570 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1571
1572 // If either is a Smi (we know that not both are), then they can only
1573 // be strictly equal if the other is a HeapNumber.
1574 STATIC_ASSERT(kSmiTag == 0);
1575 ASSERT_EQ(0, Smi::FromInt(0));
1576 __ And(t2, lhs_, Operand(rhs_));
1577 __ JumpIfNotSmi(t2, &not_smis, t0);
1578 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1579 // 1) Return the answer.
1580 // 2) Go to slow.
1581 // 3) Fall through to both_loaded_as_doubles.
1582 // 4) Jump to rhs_not_nan.
1583 // In cases 3 and 4 we have found out we were dealing with a number-number
1584 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1585 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1586 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1587 &both_loaded_as_doubles, &slow, strict_);
1588
1589 __ bind(&both_loaded_as_doubles);
1590 // f12, f14 are the double representations of the left hand side
1591 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1592 // left hand side and a0, a1 represent right hand side.
1593
1594 Isolate* isolate = masm->isolate();
1595 if (CpuFeatures::IsSupported(FPU)) {
1596 CpuFeatures::Scope scope(FPU);
1597 Label nan;
1598 __ li(t0, Operand(LESS));
1599 __ li(t1, Operand(GREATER));
1600 __ li(t2, Operand(EQUAL));
1601
1602 // Check if either rhs or lhs is NaN.
1603 __ c(UN, D, f12, f14);
1604 __ bc1t(&nan);
1605 __ nop();
1606
1607 // Check if LESS condition is satisfied. If true, move conditionally
1608 // result to v0.
1609 __ c(OLT, D, f12, f14);
1610 __ movt(v0, t0);
1611 // Use previous check to store conditionally to v0 oposite condition
1612 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1613 // check.
1614 __ movf(v0, t1);
1615 // Check if EQUAL condition is satisfied. If true, move conditionally
1616 // result to v0.
1617 __ c(EQ, D, f12, f14);
1618 __ movt(v0, t2);
1619
1620 __ Ret();
1621
1622 __ bind(&nan);
1623 // NaN comparisons always fail.
1624 // Load whatever we need in v0 to make the comparison fail.
1625 if (cc_ == lt || cc_ == le) {
1626 __ li(v0, Operand(GREATER));
1627 } else {
1628 __ li(v0, Operand(LESS));
1629 }
1630 __ Ret();
1631 } else {
1632 // Checks for NaN in the doubles we have loaded. Can return the answer or
1633 // fall through if neither is a NaN. Also binds rhs_not_nan.
1634 EmitNanCheck(masm, cc_);
1635
1636 // Compares two doubles that are not NaNs. Returns the answer.
1637 // Never falls through.
1638 EmitTwoNonNanDoubleComparison(masm, cc_);
1639 }
1640
1641 __ bind(&not_smis);
1642 // At this point we know we are dealing with two different objects,
1643 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1644 if (strict_) {
1645 // This returns non-equal for some object types, or falls through if it
1646 // was not lucky.
1647 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1648 }
1649
1650 Label check_for_symbols;
1651 Label flat_string_check;
1652 // Check for heap-number-heap-number comparison. Can jump to slow case,
1653 // or load both doubles and jump to the code that handles
1654 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1655 // In this case a2 will contain the type of lhs_.
1656 EmitCheckForTwoHeapNumbers(masm,
1657 lhs_,
1658 rhs_,
1659 &both_loaded_as_doubles,
1660 &check_for_symbols,
1661 &flat_string_check);
1662
1663 __ bind(&check_for_symbols);
1664 if (cc_ == eq && !strict_) {
1665 // Returns an answer for two symbols or two detectable objects.
1666 // Otherwise jumps to string case or not both strings case.
1667 // Assumes that a2 is the type of lhs_ on entry.
1668 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1669 }
1670
1671 // Check for both being sequential ASCII strings, and inline if that is the
1672 // case.
1673 __ bind(&flat_string_check);
1674
1675 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1676
1677 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1678 if (cc_ == eq) {
1679 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1680 lhs_,
1681 rhs_,
1682 a2,
1683 a3,
1684 t0);
1685 } else {
1686 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1687 lhs_,
1688 rhs_,
1689 a2,
1690 a3,
1691 t0,
1692 t1);
1693 }
1694 // Never falls through to here.
1695
1696 __ bind(&slow);
1697 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1698 // a1 (rhs) second.
1699 __ Push(lhs_, rhs_);
1700 // Figure out which native to call and setup the arguments.
1701 Builtins::JavaScript native;
1702 if (cc_ == eq) {
1703 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1704 } else {
1705 native = Builtins::COMPARE;
1706 int ncr; // NaN compare result.
1707 if (cc_ == lt || cc_ == le) {
1708 ncr = GREATER;
1709 } else {
1710 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1711 ncr = LESS;
1712 }
1713 __ li(a0, Operand(Smi::FromInt(ncr)));
1714 __ push(a0);
1715 }
1716
1717 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1718 // tagged as a small integer.
1719 __ InvokeBuiltin(native, JUMP_FUNCTION);
Steve Block44f0eee2011-05-26 01:26:41 +01001720}
1721
1722
1723// This stub does not handle the inlined cases (Smis, Booleans, undefined).
1724// The stub returns zero for false, and a non-zero value for true.
1725void ToBooleanStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001726 // This stub uses FPU instructions.
1727 CpuFeatures::Scope scope(FPU);
1728
1729 Label false_result;
1730 Label not_heap_number;
1731 Register scratch0 = t5.is(tos_) ? t3 : t5;
1732
1733 // undefined -> false
1734 __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
1735 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1736
1737 // Boolean -> its value
1738 __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
1739 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1740 __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
1741 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1742 // return true if the equal condition is satisfied.
1743 __ Ret(eq, tos_, Operand(scratch0));
1744
1745 // Smis: 0 -> false, all other -> true
1746 __ And(scratch0, tos_, tos_);
1747 __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
1748 __ And(scratch0, tos_, Operand(kSmiTagMask));
1749 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1750 // return true if the not equal condition is satisfied.
1751 __ Ret(eq, scratch0, Operand(zero_reg));
1752
1753 // 'null' -> false
1754 __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
1755 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1756
1757 // HeapNumber => false if +0, -0, or NaN.
1758 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1759 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1760 __ Branch(&not_heap_number, ne, scratch0, Operand(at));
1761
1762 __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1763 __ fcmp(f12, 0.0, UEQ);
1764
1765 // "tos_" is a register, and contains a non zero value by default.
1766 // Hence we only need to overwrite "tos_" with zero to return false for
1767 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1768 __ movt(tos_, zero_reg);
1769 __ Ret();
1770
1771 __ bind(&not_heap_number);
1772
1773 // It can be an undetectable object.
1774 // Undetectable => false.
1775 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
1776 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
1777 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
1778 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
1779
1780 // JavaScript object => true.
1781 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1782 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1783
1784 // "tos_" is a register and contains a non-zero value.
1785 // Hence we implicitly return true if the greater than
1786 // condition is satisfied.
1787 __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
1788
1789 // Check for string.
1790 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1791 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1792 // "tos_" is a register and contains a non-zero value.
1793 // Hence we implicitly return true if the greater than
1794 // condition is satisfied.
1795 __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
1796
1797 // String value => false iff empty, i.e., length is zero.
1798 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1799 // If length is zero, "tos_" contains zero ==> false.
1800 // If length is not zero, "tos_" contains a non-zero value ==> true.
1801 __ Ret();
1802
1803 // Return 0 in "tos_" for false.
1804 __ bind(&false_result);
1805 __ mov(tos_, zero_reg);
1806 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01001807}
1808
1809
Ben Murdoch257744e2011-11-30 15:57:28 +00001810Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
1811 UnaryOpStub stub(key, type_info);
Steve Block44f0eee2011-05-26 01:26:41 +01001812 return stub.GetCode();
1813}
1814
1815
Ben Murdoch257744e2011-11-30 15:57:28 +00001816const char* UnaryOpStub::GetName() {
1817 if (name_ != NULL) return name_;
1818 const int kMaxNameLength = 100;
1819 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
1820 kMaxNameLength);
1821 if (name_ == NULL) return "OOM";
1822 const char* op_name = Token::Name(op_);
1823 const char* overwrite_name = NULL; // Make g++ happy.
1824 switch (mode_) {
1825 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1826 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1827 }
1828
1829 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
1830 "UnaryOpStub_%s_%s_%s",
1831 op_name,
1832 overwrite_name,
1833 UnaryOpIC::GetName(operand_type_));
1834 return name_;
1835}
1836
1837
1838// TODO(svenpanne): Use virtual functions instead of switch.
1839void UnaryOpStub::Generate(MacroAssembler* masm) {
1840 switch (operand_type_) {
1841 case UnaryOpIC::UNINITIALIZED:
1842 GenerateTypeTransition(masm);
1843 break;
1844 case UnaryOpIC::SMI:
1845 GenerateSmiStub(masm);
1846 break;
1847 case UnaryOpIC::HEAP_NUMBER:
1848 GenerateHeapNumberStub(masm);
1849 break;
1850 case UnaryOpIC::GENERIC:
1851 GenerateGenericStub(masm);
1852 break;
1853 }
1854}
1855
1856
1857void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1858 // Argument is in a0 and v0 at this point, so we can overwrite a0.
1859 // Push this stub's key. Although the operation and the type info are
1860 // encoded into the key, the encoding is opaque, so push them too.
1861 __ li(a2, Operand(Smi::FromInt(MinorKey())));
1862 __ li(a1, Operand(Smi::FromInt(op_)));
1863 __ li(a0, Operand(Smi::FromInt(operand_type_)));
1864
1865 __ Push(v0, a2, a1, a0);
1866
1867 __ TailCallExternalReference(
1868 ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
1869 masm->isolate()),
1870 4,
1871 1);
1872}
1873
1874
1875// TODO(svenpanne): Use virtual functions instead of switch.
1876void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1877 switch (op_) {
1878 case Token::SUB:
1879 GenerateSmiStubSub(masm);
1880 break;
1881 case Token::BIT_NOT:
1882 GenerateSmiStubBitNot(masm);
1883 break;
1884 default:
1885 UNREACHABLE();
1886 }
1887}
1888
1889
1890void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1891 Label non_smi, slow;
1892 GenerateSmiCodeSub(masm, &non_smi, &slow);
1893 __ bind(&non_smi);
1894 __ bind(&slow);
1895 GenerateTypeTransition(masm);
1896}
1897
1898
1899void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1900 Label non_smi;
1901 GenerateSmiCodeBitNot(masm, &non_smi);
1902 __ bind(&non_smi);
1903 GenerateTypeTransition(masm);
1904}
1905
1906
1907void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1908 Label* non_smi,
1909 Label* slow) {
1910 __ JumpIfNotSmi(a0, non_smi);
1911
1912 // The result of negating zero or the smallest negative smi is not a smi.
1913 __ And(t0, a0, ~0x80000000);
1914 __ Branch(slow, eq, t0, Operand(zero_reg));
1915
1916 // Return '0 - value'.
1917 __ Subu(v0, zero_reg, a0);
1918 __ Ret();
1919}
1920
1921
1922void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1923 Label* non_smi) {
1924 __ JumpIfNotSmi(a0, non_smi);
1925
1926 // Flip bits and revert inverted smi-tag.
1927 __ Neg(v0, a0);
1928 __ And(v0, v0, ~kSmiTagMask);
1929 __ Ret();
1930}
1931
1932
1933// TODO(svenpanne): Use virtual functions instead of switch.
1934void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1935 switch (op_) {
1936 case Token::SUB:
1937 GenerateHeapNumberStubSub(masm);
1938 break;
1939 case Token::BIT_NOT:
1940 GenerateHeapNumberStubBitNot(masm);
1941 break;
1942 default:
1943 UNREACHABLE();
1944 }
1945}
1946
1947
1948void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
1949 Label non_smi, slow, call_builtin;
1950 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1951 __ bind(&non_smi);
1952 GenerateHeapNumberCodeSub(masm, &slow);
1953 __ bind(&slow);
1954 GenerateTypeTransition(masm);
1955 __ bind(&call_builtin);
1956 GenerateGenericCodeFallback(masm);
1957}
1958
1959
1960void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
1961 Label non_smi, slow;
1962 GenerateSmiCodeBitNot(masm, &non_smi);
1963 __ bind(&non_smi);
1964 GenerateHeapNumberCodeBitNot(masm, &slow);
1965 __ bind(&slow);
1966 GenerateTypeTransition(masm);
1967}
1968
1969void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1970 Label* slow) {
1971 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1972 // a0 is a heap number. Get a new heap number in a1.
1973 if (mode_ == UNARY_OVERWRITE) {
1974 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1975 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1976 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1977 } else {
1978 Label slow_allocate_heapnumber, heapnumber_allocated;
1979 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1980 __ jmp(&heapnumber_allocated);
1981
1982 __ bind(&slow_allocate_heapnumber);
1983 __ EnterInternalFrame();
1984 __ push(a0);
1985 __ CallRuntime(Runtime::kNumberAlloc, 0);
1986 __ mov(a1, v0);
1987 __ pop(a0);
1988 __ LeaveInternalFrame();
1989
1990 __ bind(&heapnumber_allocated);
1991 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1992 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1993 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1994 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1995 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1996 __ mov(v0, a1);
1997 }
1998 __ Ret();
1999}
2000
2001
2002void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2003 MacroAssembler* masm,
2004 Label* slow) {
2005 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2006 // Convert the heap number in a0 to an untagged integer in a1.
2007 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2008
2009 // Do the bitwise operation and check if the result fits in a smi.
2010 Label try_float;
2011 __ Neg(a1, a1);
2012 __ Addu(a2, a1, Operand(0x40000000));
2013 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2014
2015 // Tag the result as a smi and we're done.
2016 __ SmiTag(v0, a1);
2017 __ Ret();
2018
2019 // Try to store the result in a heap number.
2020 __ bind(&try_float);
2021 if (mode_ == UNARY_NO_OVERWRITE) {
2022 Label slow_allocate_heapnumber, heapnumber_allocated;
2023 __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber);
2024 __ jmp(&heapnumber_allocated);
2025
2026 __ bind(&slow_allocate_heapnumber);
2027 __ EnterInternalFrame();
2028 __ push(a1);
2029 __ CallRuntime(Runtime::kNumberAlloc, 0);
2030 __ pop(a1);
2031 __ LeaveInternalFrame();
2032
2033 __ bind(&heapnumber_allocated);
2034 }
2035
2036 if (CpuFeatures::IsSupported(FPU)) {
2037 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2038 CpuFeatures::Scope scope(FPU);
2039 __ mtc1(a1, f0);
2040 __ cvt_d_w(f0, f0);
2041 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2042 __ Ret();
2043 } else {
2044 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2045 // have to set up a frame.
2046 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2047 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2048 }
2049}
2050
2051
2052// TODO(svenpanne): Use virtual functions instead of switch.
2053void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2054 switch (op_) {
2055 case Token::SUB:
2056 GenerateGenericStubSub(masm);
2057 break;
2058 case Token::BIT_NOT:
2059 GenerateGenericStubBitNot(masm);
2060 break;
2061 default:
2062 UNREACHABLE();
2063 }
2064}
2065
2066
2067void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2068 Label non_smi, slow;
2069 GenerateSmiCodeSub(masm, &non_smi, &slow);
2070 __ bind(&non_smi);
2071 GenerateHeapNumberCodeSub(masm, &slow);
2072 __ bind(&slow);
2073 GenerateGenericCodeFallback(masm);
2074}
2075
2076
2077void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2078 Label non_smi, slow;
2079 GenerateSmiCodeBitNot(masm, &non_smi);
2080 __ bind(&non_smi);
2081 GenerateHeapNumberCodeBitNot(masm, &slow);
2082 __ bind(&slow);
2083 GenerateGenericCodeFallback(masm);
2084}
2085
2086
2087void UnaryOpStub::GenerateGenericCodeFallback(
2088 MacroAssembler* masm) {
2089 // Handle the slow case by jumping to the JavaScript builtin.
2090 __ push(a0);
2091 switch (op_) {
2092 case Token::SUB:
2093 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2094 break;
2095 case Token::BIT_NOT:
2096 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2097 break;
2098 default:
2099 UNREACHABLE();
2100 }
2101}
2102
2103
2104Handle<Code> GetBinaryOpStub(int key,
2105 BinaryOpIC::TypeInfo type_info,
2106 BinaryOpIC::TypeInfo result_type_info) {
2107 BinaryOpStub stub(key, type_info, result_type_info);
Steve Block44f0eee2011-05-26 01:26:41 +01002108 return stub.GetCode();
2109}
2110
2111
Ben Murdoch257744e2011-11-30 15:57:28 +00002112void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2113 Label get_result;
2114
2115 __ Push(a1, a0);
2116
2117 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2118 __ li(a1, Operand(Smi::FromInt(op_)));
2119 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2120 __ Push(a2, a1, a0);
2121
2122 __ TailCallExternalReference(
2123 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2124 masm->isolate()),
2125 5,
2126 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002127}
2128
2129
Ben Murdoch257744e2011-11-30 15:57:28 +00002130void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
Steve Block44f0eee2011-05-26 01:26:41 +01002131 MacroAssembler* masm) {
2132 UNIMPLEMENTED();
2133}
2134
2135
Ben Murdoch257744e2011-11-30 15:57:28 +00002136void BinaryOpStub::Generate(MacroAssembler* masm) {
2137 switch (operands_type_) {
2138 case BinaryOpIC::UNINITIALIZED:
2139 GenerateTypeTransition(masm);
2140 break;
2141 case BinaryOpIC::SMI:
2142 GenerateSmiStub(masm);
2143 break;
2144 case BinaryOpIC::INT32:
2145 GenerateInt32Stub(masm);
2146 break;
2147 case BinaryOpIC::HEAP_NUMBER:
2148 GenerateHeapNumberStub(masm);
2149 break;
2150 case BinaryOpIC::ODDBALL:
2151 GenerateOddballStub(masm);
2152 break;
2153 case BinaryOpIC::BOTH_STRING:
2154 GenerateBothStringStub(masm);
2155 break;
2156 case BinaryOpIC::STRING:
2157 GenerateStringStub(masm);
2158 break;
2159 case BinaryOpIC::GENERIC:
2160 GenerateGeneric(masm);
2161 break;
2162 default:
2163 UNREACHABLE();
2164 }
Steve Block44f0eee2011-05-26 01:26:41 +01002165}
2166
2167
Ben Murdoch257744e2011-11-30 15:57:28 +00002168const char* BinaryOpStub::GetName() {
2169 if (name_ != NULL) return name_;
2170 const int kMaxNameLength = 100;
2171 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
2172 kMaxNameLength);
2173 if (name_ == NULL) return "OOM";
2174 const char* op_name = Token::Name(op_);
2175 const char* overwrite_name;
2176 switch (mode_) {
2177 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2178 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2179 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2180 default: overwrite_name = "UnknownOverwrite"; break;
2181 }
2182
2183 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2184 "BinaryOpStub_%s_%s_%s",
2185 op_name,
2186 overwrite_name,
2187 BinaryOpIC::GetName(operands_type_));
Steve Block44f0eee2011-05-26 01:26:41 +01002188 return name_;
2189}
2190
2191
2192
Ben Murdoch257744e2011-11-30 15:57:28 +00002193void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2194 Register left = a1;
2195 Register right = a0;
2196
2197 Register scratch1 = t0;
2198 Register scratch2 = t1;
2199
2200 ASSERT(right.is(a0));
2201 STATIC_ASSERT(kSmiTag == 0);
2202
2203 Label not_smi_result;
2204 switch (op_) {
2205 case Token::ADD:
2206 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2207 __ RetOnNoOverflow(scratch1);
2208 // No need to revert anything - right and left are intact.
2209 break;
2210 case Token::SUB:
2211 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2212 __ RetOnNoOverflow(scratch1);
2213 // No need to revert anything - right and left are intact.
2214 break;
2215 case Token::MUL: {
2216 // Remove tag from one of the operands. This way the multiplication result
2217 // will be a smi if it fits the smi range.
2218 __ SmiUntag(scratch1, right);
2219 // Do multiplication.
2220 // lo = lower 32 bits of scratch1 * left.
2221 // hi = higher 32 bits of scratch1 * left.
2222 __ Mult(left, scratch1);
2223 // Check for overflowing the smi range - no overflow if higher 33 bits of
2224 // the result are identical.
2225 __ mflo(scratch1);
2226 __ mfhi(scratch2);
2227 __ sra(scratch1, scratch1, 31);
2228 __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2229 // Go slow on zero result to handle -0.
2230 __ mflo(v0);
2231 __ Ret(ne, v0, Operand(zero_reg));
2232 // We need -0 if we were multiplying a negative number with 0 to get 0.
2233 // We know one of them was zero.
2234 __ Addu(scratch2, right, left);
2235 Label skip;
2236 // ARM uses the 'pl' condition, which is 'ge'.
2237 // Negating it results in 'lt'.
2238 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2239 ASSERT(Smi::FromInt(0) == 0);
2240 __ mov(v0, zero_reg);
2241 __ Ret(); // Return smi 0 if the non-zero one was positive.
2242 __ bind(&skip);
2243 // We fall through here if we multiplied a negative number with 0, because
2244 // that would mean we should produce -0.
2245 }
2246 break;
2247 case Token::DIV: {
2248 Label done;
2249 __ SmiUntag(scratch2, right);
2250 __ SmiUntag(scratch1, left);
2251 __ Div(scratch1, scratch2);
2252 // A minor optimization: div may be calculated asynchronously, so we check
2253 // for division by zero before getting the result.
2254 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2255 // If the result is 0, we need to make sure the dividsor (right) is
2256 // positive, otherwise it is a -0 case.
2257 // Quotient is in 'lo', remainder is in 'hi'.
2258 // Check for no remainder first.
2259 __ mfhi(scratch1);
2260 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2261 __ mflo(scratch1);
2262 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2263 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2264 __ bind(&done);
2265 // Check that the signed result fits in a Smi.
2266 __ Addu(scratch2, scratch1, Operand(0x40000000));
2267 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2268 __ SmiTag(v0, scratch1);
2269 __ Ret();
2270 }
2271 break;
2272 case Token::MOD: {
2273 Label done;
2274 __ SmiUntag(scratch2, right);
2275 __ SmiUntag(scratch1, left);
2276 __ Div(scratch1, scratch2);
2277 // A minor optimization: div may be calculated asynchronously, so we check
2278 // for division by 0 before calling mfhi.
2279 // Check for zero on the right hand side.
2280 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2281 // If the result is 0, we need to make sure the dividend (left) is
2282 // positive (or 0), otherwise it is a -0 case.
2283 // Remainder is in 'hi'.
2284 __ mfhi(scratch2);
2285 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2286 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2287 __ bind(&done);
2288 // Check that the signed result fits in a Smi.
2289 __ Addu(scratch1, scratch2, Operand(0x40000000));
2290 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2291 __ SmiTag(v0, scratch2);
2292 __ Ret();
2293 }
2294 break;
2295 case Token::BIT_OR:
2296 __ Or(v0, left, Operand(right));
2297 __ Ret();
2298 break;
2299 case Token::BIT_AND:
2300 __ And(v0, left, Operand(right));
2301 __ Ret();
2302 break;
2303 case Token::BIT_XOR:
2304 __ Xor(v0, left, Operand(right));
2305 __ Ret();
2306 break;
2307 case Token::SAR:
2308 // Remove tags from right operand.
2309 __ GetLeastBitsFromSmi(scratch1, right, 5);
2310 __ srav(scratch1, left, scratch1);
2311 // Smi tag result.
2312 __ And(v0, scratch1, Operand(~kSmiTagMask));
2313 __ Ret();
2314 break;
2315 case Token::SHR:
2316 // Remove tags from operands. We can't do this on a 31 bit number
2317 // because then the 0s get shifted into bit 30 instead of bit 31.
2318 __ SmiUntag(scratch1, left);
2319 __ GetLeastBitsFromSmi(scratch2, right, 5);
2320 __ srlv(v0, scratch1, scratch2);
2321 // Unsigned shift is not allowed to produce a negative number, so
2322 // check the sign bit and the sign bit after Smi tagging.
2323 __ And(scratch1, v0, Operand(0xc0000000));
2324 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2325 // Smi tag result.
2326 __ SmiTag(v0);
2327 __ Ret();
2328 break;
2329 case Token::SHL:
2330 // Remove tags from operands.
2331 __ SmiUntag(scratch1, left);
2332 __ GetLeastBitsFromSmi(scratch2, right, 5);
2333 __ sllv(scratch1, scratch1, scratch2);
2334 // Check that the signed result fits in a Smi.
2335 __ Addu(scratch2, scratch1, Operand(0x40000000));
2336 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2337 __ SmiTag(v0, scratch1);
2338 __ Ret();
2339 break;
2340 default:
2341 UNREACHABLE();
2342 }
2343 __ bind(&not_smi_result);
Steve Block44f0eee2011-05-26 01:26:41 +01002344}
2345
2346
Ben Murdoch257744e2011-11-30 15:57:28 +00002347void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2348 bool smi_operands,
2349 Label* not_numbers,
2350 Label* gc_required) {
2351 Register left = a1;
2352 Register right = a0;
2353 Register scratch1 = t3;
2354 Register scratch2 = t5;
2355 Register scratch3 = t0;
2356
2357 ASSERT(smi_operands || (not_numbers != NULL));
2358 if (smi_operands && FLAG_debug_code) {
2359 __ AbortIfNotSmi(left);
2360 __ AbortIfNotSmi(right);
2361 }
2362
2363 Register heap_number_map = t2;
2364 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2365
2366 switch (op_) {
2367 case Token::ADD:
2368 case Token::SUB:
2369 case Token::MUL:
2370 case Token::DIV:
2371 case Token::MOD: {
2372 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2373 // depending on whether FPU is available or not.
2374 FloatingPointHelper::Destination destination =
2375 CpuFeatures::IsSupported(FPU) &&
2376 op_ != Token::MOD ?
2377 FloatingPointHelper::kFPURegisters :
2378 FloatingPointHelper::kCoreRegisters;
2379
2380 // Allocate new heap number for result.
2381 Register result = s0;
2382 GenerateHeapResultAllocation(
2383 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2384
2385 // Load the operands.
2386 if (smi_operands) {
2387 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2388 } else {
2389 FloatingPointHelper::LoadOperands(masm,
2390 destination,
2391 heap_number_map,
2392 scratch1,
2393 scratch2,
2394 not_numbers);
2395 }
2396
2397 // Calculate the result.
2398 if (destination == FloatingPointHelper::kFPURegisters) {
2399 // Using FPU registers:
2400 // f12: Left value.
2401 // f14: Right value.
2402 CpuFeatures::Scope scope(FPU);
2403 switch (op_) {
2404 case Token::ADD:
2405 __ add_d(f10, f12, f14);
2406 break;
2407 case Token::SUB:
2408 __ sub_d(f10, f12, f14);
2409 break;
2410 case Token::MUL:
2411 __ mul_d(f10, f12, f14);
2412 break;
2413 case Token::DIV:
2414 __ div_d(f10, f12, f14);
2415 break;
2416 default:
2417 UNREACHABLE();
2418 }
2419
2420 // ARM uses a workaround here because of the unaligned HeapNumber
2421 // kValueOffset. On MIPS this workaround is built into sdc1 so
2422 // there's no point in generating even more instructions.
2423 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2424 __ mov(v0, result);
2425 __ Ret();
2426 } else {
2427 // Call the C function to handle the double operation.
2428 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2429 op_,
2430 result,
2431 scratch1);
2432 if (FLAG_debug_code) {
2433 __ stop("Unreachable code.");
2434 }
2435 }
2436 break;
2437 }
2438 case Token::BIT_OR:
2439 case Token::BIT_XOR:
2440 case Token::BIT_AND:
2441 case Token::SAR:
2442 case Token::SHR:
2443 case Token::SHL: {
2444 if (smi_operands) {
2445 __ SmiUntag(a3, left);
2446 __ SmiUntag(a2, right);
2447 } else {
2448 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2449 FloatingPointHelper::ConvertNumberToInt32(masm,
2450 left,
2451 a3,
2452 heap_number_map,
2453 scratch1,
2454 scratch2,
2455 scratch3,
2456 f0,
2457 not_numbers);
2458 FloatingPointHelper::ConvertNumberToInt32(masm,
2459 right,
2460 a2,
2461 heap_number_map,
2462 scratch1,
2463 scratch2,
2464 scratch3,
2465 f0,
2466 not_numbers);
2467 }
2468 Label result_not_a_smi;
2469 switch (op_) {
2470 case Token::BIT_OR:
2471 __ Or(a2, a3, Operand(a2));
2472 break;
2473 case Token::BIT_XOR:
2474 __ Xor(a2, a3, Operand(a2));
2475 break;
2476 case Token::BIT_AND:
2477 __ And(a2, a3, Operand(a2));
2478 break;
2479 case Token::SAR:
2480 // Use only the 5 least significant bits of the shift count.
2481 __ GetLeastBitsFromInt32(a2, a2, 5);
2482 __ srav(a2, a3, a2);
2483 break;
2484 case Token::SHR:
2485 // Use only the 5 least significant bits of the shift count.
2486 __ GetLeastBitsFromInt32(a2, a2, 5);
2487 __ srlv(a2, a3, a2);
2488 // SHR is special because it is required to produce a positive answer.
2489 // The code below for writing into heap numbers isn't capable of
2490 // writing the register as an unsigned int so we go to slow case if we
2491 // hit this case.
2492 if (CpuFeatures::IsSupported(FPU)) {
2493 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2494 } else {
2495 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2496 }
2497 break;
2498 case Token::SHL:
2499 // Use only the 5 least significant bits of the shift count.
2500 __ GetLeastBitsFromInt32(a2, a2, 5);
2501 __ sllv(a2, a3, a2);
2502 break;
2503 default:
2504 UNREACHABLE();
2505 }
2506 // Check that the *signed* result fits in a smi.
2507 __ Addu(a3, a2, Operand(0x40000000));
2508 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2509 __ SmiTag(v0, a2);
2510 __ Ret();
2511
2512 // Allocate new heap number for result.
2513 __ bind(&result_not_a_smi);
2514 Register result = t1;
2515 if (smi_operands) {
2516 __ AllocateHeapNumber(
2517 result, scratch1, scratch2, heap_number_map, gc_required);
2518 } else {
2519 GenerateHeapResultAllocation(
2520 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2521 }
2522
2523 // a2: Answer as signed int32.
2524 // t1: Heap number to write answer into.
2525
2526 // Nothing can go wrong now, so move the heap number to v0, which is the
2527 // result.
2528 __ mov(v0, t1);
2529
2530 if (CpuFeatures::IsSupported(FPU)) {
2531 // Convert the int32 in a2 to the heap number in a0. As
2532 // mentioned above SHR needs to always produce a positive result.
2533 CpuFeatures::Scope scope(FPU);
2534 __ mtc1(a2, f0);
2535 if (op_ == Token::SHR) {
2536 __ Cvt_d_uw(f0, f0);
2537 } else {
2538 __ cvt_d_w(f0, f0);
2539 }
2540 // ARM uses a workaround here because of the unaligned HeapNumber
2541 // kValueOffset. On MIPS this workaround is built into sdc1 so
2542 // there's no point in generating even more instructions.
2543 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2544 __ Ret();
2545 } else {
2546 // Tail call that writes the int32 in a2 to the heap number in v0, using
2547 // a3 and a0 as scratch. v0 is preserved and returned.
2548 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2549 __ TailCallStub(&stub);
2550 }
2551 break;
2552 }
2553 default:
2554 UNREACHABLE();
2555 }
Steve Block44f0eee2011-05-26 01:26:41 +01002556}
2557
2558
2559// Generate the smi code. If the operation on smis are successful this return is
2560// generated. If the result is not a smi and heap number allocation is not
2561// requested the code falls through. If number allocation is requested but a
2562// heap number cannot be allocated the code jumps to the lable gc_required.
Ben Murdoch257744e2011-11-30 15:57:28 +00002563void BinaryOpStub::GenerateSmiCode(
2564 MacroAssembler* masm,
2565 Label* use_runtime,
Steve Block44f0eee2011-05-26 01:26:41 +01002566 Label* gc_required,
2567 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002568 Label not_smis;
2569
2570 Register left = a1;
2571 Register right = a0;
2572 Register scratch1 = t3;
2573 Register scratch2 = t5;
2574
2575 // Perform combined smi check on both operands.
2576 __ Or(scratch1, left, Operand(right));
2577 STATIC_ASSERT(kSmiTag == 0);
2578 __ JumpIfNotSmi(scratch1, &not_smis);
2579
2580 // If the smi-smi operation results in a smi return is generated.
2581 GenerateSmiSmiOperation(masm);
2582
2583 // If heap number results are possible generate the result in an allocated
2584 // heap number.
2585 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2586 GenerateFPOperation(masm, true, use_runtime, gc_required);
2587 }
2588 __ bind(&not_smis);
Steve Block44f0eee2011-05-26 01:26:41 +01002589}
2590
2591
Ben Murdoch257744e2011-11-30 15:57:28 +00002592void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2593 Label not_smis, call_runtime;
2594
2595 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2596 result_type_ == BinaryOpIC::SMI) {
2597 // Only allow smi results.
2598 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2599 } else {
2600 // Allow heap number result and don't make a transition if a heap number
2601 // cannot be allocated.
2602 GenerateSmiCode(masm,
2603 &call_runtime,
2604 &call_runtime,
2605 ALLOW_HEAPNUMBER_RESULTS);
2606 }
2607
2608 // Code falls through if the result is not returned as either a smi or heap
2609 // number.
2610 GenerateTypeTransition(masm);
2611
2612 __ bind(&call_runtime);
2613 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002614}
2615
2616
Ben Murdoch257744e2011-11-30 15:57:28 +00002617void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2618 ASSERT(operands_type_ == BinaryOpIC::STRING);
2619 // Try to add arguments as strings, otherwise, transition to the generic
2620 // BinaryOpIC type.
2621 GenerateAddStrings(masm);
2622 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002623}
2624
2625
Ben Murdoch257744e2011-11-30 15:57:28 +00002626void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2627 Label call_runtime;
2628 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2629 ASSERT(op_ == Token::ADD);
2630 // If both arguments are strings, call the string add stub.
2631 // Otherwise, do a transition.
2632
2633 // Registers containing left and right operands respectively.
2634 Register left = a1;
2635 Register right = a0;
2636
2637 // Test if left operand is a string.
2638 __ JumpIfSmi(left, &call_runtime);
2639 __ GetObjectType(left, a2, a2);
2640 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2641
2642 // Test if right operand is a string.
2643 __ JumpIfSmi(right, &call_runtime);
2644 __ GetObjectType(right, a2, a2);
2645 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2646
2647 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2648 GenerateRegisterArgsPush(masm);
2649 __ TailCallStub(&string_add_stub);
2650
2651 __ bind(&call_runtime);
2652 GenerateTypeTransition(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002653}
2654
2655
Ben Murdoch257744e2011-11-30 15:57:28 +00002656void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2657 ASSERT(operands_type_ == BinaryOpIC::INT32);
2658
2659 Register left = a1;
2660 Register right = a0;
2661 Register scratch1 = t3;
2662 Register scratch2 = t5;
2663 FPURegister double_scratch = f0;
2664 FPURegister single_scratch = f6;
2665
2666 Register heap_number_result = no_reg;
2667 Register heap_number_map = t2;
2668 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2669
2670 Label call_runtime;
2671 // Labels for type transition, used for wrong input or output types.
2672 // Both label are currently actually bound to the same position. We use two
2673 // different label to differentiate the cause leading to type transition.
2674 Label transition;
2675
2676 // Smi-smi fast case.
2677 Label skip;
2678 __ Or(scratch1, left, right);
2679 __ JumpIfNotSmi(scratch1, &skip);
2680 GenerateSmiSmiOperation(masm);
2681 // Fall through if the result is not a smi.
2682 __ bind(&skip);
2683
2684 switch (op_) {
2685 case Token::ADD:
2686 case Token::SUB:
2687 case Token::MUL:
2688 case Token::DIV:
2689 case Token::MOD: {
2690 // Load both operands and check that they are 32-bit integer.
2691 // Jump to type transition if they are not. The registers a0 and a1 (right
2692 // and left) are preserved for the runtime call.
2693 FloatingPointHelper::Destination destination =
2694 CpuFeatures::IsSupported(FPU) &&
2695 op_ != Token::MOD ?
2696 FloatingPointHelper::kFPURegisters :
2697 FloatingPointHelper::kCoreRegisters;
2698
2699 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2700 right,
2701 destination,
2702 f14,
2703 a2,
2704 a3,
2705 heap_number_map,
2706 scratch1,
2707 scratch2,
2708 f2,
2709 &transition);
2710 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2711 left,
2712 destination,
2713 f12,
2714 t0,
2715 t1,
2716 heap_number_map,
2717 scratch1,
2718 scratch2,
2719 f2,
2720 &transition);
2721
2722 if (destination == FloatingPointHelper::kFPURegisters) {
2723 CpuFeatures::Scope scope(FPU);
2724 Label return_heap_number;
2725 switch (op_) {
2726 case Token::ADD:
2727 __ add_d(f10, f12, f14);
2728 break;
2729 case Token::SUB:
2730 __ sub_d(f10, f12, f14);
2731 break;
2732 case Token::MUL:
2733 __ mul_d(f10, f12, f14);
2734 break;
2735 case Token::DIV:
2736 __ div_d(f10, f12, f14);
2737 break;
2738 default:
2739 UNREACHABLE();
2740 }
2741
2742 if (op_ != Token::DIV) {
2743 // These operations produce an integer result.
2744 // Try to return a smi if we can.
2745 // Otherwise return a heap number if allowed, or jump to type
2746 // transition.
2747
2748 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
2749 // On MIPS a lot of things cannot be implemented the same way so right
2750 // now it makes a lot more sense to just do things manually.
2751
2752 // Save FCSR.
2753 __ cfc1(scratch1, FCSR);
2754 // Disable FPU exceptions.
2755 __ ctc1(zero_reg, FCSR);
2756 __ trunc_w_d(single_scratch, f10);
2757 // Retrieve FCSR.
2758 __ cfc1(scratch2, FCSR);
2759 // Restore FCSR.
2760 __ ctc1(scratch1, FCSR);
2761
2762 // Check for inexact conversion.
2763 __ srl(scratch2, scratch2, kFCSRFlagShift);
2764 __ And(scratch2, scratch2, kFCSRFlagMask);
2765
2766 if (result_type_ <= BinaryOpIC::INT32) {
2767 // If scratch2 != 0, result does not fit in a 32-bit integer.
2768 __ Branch(&transition, ne, scratch2, Operand(zero_reg));
2769 }
2770
2771 // Check if the result fits in a smi.
2772 __ mfc1(scratch1, single_scratch);
2773 __ Addu(scratch2, scratch1, Operand(0x40000000));
2774 // If not try to return a heap number.
2775 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2776 // Check for minus zero. Return heap number for minus zero.
2777 Label not_zero;
2778 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2779 __ mfc1(scratch2, f11);
2780 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2781 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2782 __ bind(&not_zero);
2783
2784 // Tag the result and return.
2785 __ SmiTag(v0, scratch1);
2786 __ Ret();
2787 } else {
2788 // DIV just falls through to allocating a heap number.
2789 }
2790
2791 if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2792 : BinaryOpIC::INT32) {
2793 __ bind(&return_heap_number);
2794 // We are using FPU registers so s0 is available.
2795 heap_number_result = s0;
2796 GenerateHeapResultAllocation(masm,
2797 heap_number_result,
2798 heap_number_map,
2799 scratch1,
2800 scratch2,
2801 &call_runtime);
2802 __ mov(v0, heap_number_result);
2803 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2804 __ Ret();
2805 }
2806
2807 // A DIV operation expecting an integer result falls through
2808 // to type transition.
2809
2810 } else {
2811 // We preserved a0 and a1 to be able to call runtime.
2812 // Save the left value on the stack.
2813 __ Push(t1, t0);
2814
2815 Label pop_and_call_runtime;
2816
2817 // Allocate a heap number to store the result.
2818 heap_number_result = s0;
2819 GenerateHeapResultAllocation(masm,
2820 heap_number_result,
2821 heap_number_map,
2822 scratch1,
2823 scratch2,
2824 &pop_and_call_runtime);
2825
2826 // Load the left value from the value saved on the stack.
2827 __ Pop(a1, a0);
2828
2829 // Call the C function to handle the double operation.
2830 FloatingPointHelper::CallCCodeForDoubleOperation(
2831 masm, op_, heap_number_result, scratch1);
2832 if (FLAG_debug_code) {
2833 __ stop("Unreachable code.");
2834 }
2835
2836 __ bind(&pop_and_call_runtime);
2837 __ Drop(2);
2838 __ Branch(&call_runtime);
2839 }
2840
2841 break;
2842 }
2843
2844 case Token::BIT_OR:
2845 case Token::BIT_XOR:
2846 case Token::BIT_AND:
2847 case Token::SAR:
2848 case Token::SHR:
2849 case Token::SHL: {
2850 Label return_heap_number;
2851 Register scratch3 = t1;
2852 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
2853 // registers a0 and a1 (right and left) are preserved for the runtime
2854 // call.
2855 FloatingPointHelper::LoadNumberAsInt32(masm,
2856 left,
2857 a3,
2858 heap_number_map,
2859 scratch1,
2860 scratch2,
2861 scratch3,
2862 f0,
2863 &transition);
2864 FloatingPointHelper::LoadNumberAsInt32(masm,
2865 right,
2866 a2,
2867 heap_number_map,
2868 scratch1,
2869 scratch2,
2870 scratch3,
2871 f0,
2872 &transition);
2873
2874 // The ECMA-262 standard specifies that, for shift operations, only the
2875 // 5 least significant bits of the shift value should be used.
2876 switch (op_) {
2877 case Token::BIT_OR:
2878 __ Or(a2, a3, Operand(a2));
2879 break;
2880 case Token::BIT_XOR:
2881 __ Xor(a2, a3, Operand(a2));
2882 break;
2883 case Token::BIT_AND:
2884 __ And(a2, a3, Operand(a2));
2885 break;
2886 case Token::SAR:
2887 __ And(a2, a2, Operand(0x1f));
2888 __ srav(a2, a3, a2);
2889 break;
2890 case Token::SHR:
2891 __ And(a2, a2, Operand(0x1f));
2892 __ srlv(a2, a3, a2);
2893 // SHR is special because it is required to produce a positive answer.
2894 // We only get a negative result if the shift value (a2) is 0.
2895 // This result cannot be respresented as a signed 32-bit integer, try
2896 // to return a heap number if we can.
2897 // The non FPU code does not support this special case, so jump to
2898 // runtime if we don't support it.
2899 if (CpuFeatures::IsSupported(FPU)) {
2900 __ Branch((result_type_ <= BinaryOpIC::INT32)
2901 ? &transition
2902 : &return_heap_number,
2903 lt,
2904 a2,
2905 Operand(zero_reg));
2906 } else {
2907 __ Branch((result_type_ <= BinaryOpIC::INT32)
2908 ? &transition
2909 : &call_runtime,
2910 lt,
2911 a2,
2912 Operand(zero_reg));
2913 }
2914 break;
2915 case Token::SHL:
2916 __ And(a2, a2, Operand(0x1f));
2917 __ sllv(a2, a3, a2);
2918 break;
2919 default:
2920 UNREACHABLE();
2921 }
2922
2923 // Check if the result fits in a smi.
2924 __ Addu(scratch1, a2, Operand(0x40000000));
2925 // If not try to return a heap number. (We know the result is an int32.)
2926 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
2927 // Tag the result and return.
2928 __ SmiTag(v0, a2);
2929 __ Ret();
2930
2931 __ bind(&return_heap_number);
2932 heap_number_result = t1;
2933 GenerateHeapResultAllocation(masm,
2934 heap_number_result,
2935 heap_number_map,
2936 scratch1,
2937 scratch2,
2938 &call_runtime);
2939
2940 if (CpuFeatures::IsSupported(FPU)) {
2941 CpuFeatures::Scope scope(FPU);
2942
2943 if (op_ != Token::SHR) {
2944 // Convert the result to a floating point value.
2945 __ mtc1(a2, double_scratch);
2946 __ cvt_d_w(double_scratch, double_scratch);
2947 } else {
2948 // The result must be interpreted as an unsigned 32-bit integer.
2949 __ mtc1(a2, double_scratch);
2950 __ Cvt_d_uw(double_scratch, double_scratch);
2951 }
2952
2953 // Store the result.
2954 __ mov(v0, heap_number_result);
2955 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
2956 __ Ret();
2957 } else {
2958 // Tail call that writes the int32 in a2 to the heap number in v0, using
2959 // a3 and a1 as scratch. v0 is preserved and returned.
2960 __ mov(a0, t1);
2961 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
2962 __ TailCallStub(&stub);
2963 }
2964
2965 break;
2966 }
2967
2968 default:
2969 UNREACHABLE();
2970 }
2971
2972 if (transition.is_linked()) {
2973 __ bind(&transition);
2974 GenerateTypeTransition(masm);
2975 }
2976
2977 __ bind(&call_runtime);
2978 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01002979}
2980
2981
Ben Murdoch257744e2011-11-30 15:57:28 +00002982void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
2983 Label call_runtime;
2984
2985 if (op_ == Token::ADD) {
2986 // Handle string addition here, because it is the only operation
2987 // that does not do a ToNumber conversion on the operands.
2988 GenerateAddStrings(masm);
2989 }
2990
2991 // Convert oddball arguments to numbers.
2992 Label check, done;
2993 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2994 __ Branch(&check, ne, a1, Operand(t0));
2995 if (Token::IsBitOp(op_)) {
2996 __ li(a1, Operand(Smi::FromInt(0)));
2997 } else {
2998 __ LoadRoot(a1, Heap::kNanValueRootIndex);
2999 }
3000 __ jmp(&done);
3001 __ bind(&check);
3002 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3003 __ Branch(&done, ne, a0, Operand(t0));
3004 if (Token::IsBitOp(op_)) {
3005 __ li(a0, Operand(Smi::FromInt(0)));
3006 } else {
3007 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3008 }
3009 __ bind(&done);
3010
3011 GenerateHeapNumberStub(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003012}
3013
3014
Ben Murdoch257744e2011-11-30 15:57:28 +00003015void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3016 Label call_runtime;
3017 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3018
3019 __ bind(&call_runtime);
3020 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003021}
3022
3023
Ben Murdoch257744e2011-11-30 15:57:28 +00003024void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3025 Label call_runtime, call_string_add_or_runtime;
3026
3027 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3028
3029 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3030
3031 __ bind(&call_string_add_or_runtime);
3032 if (op_ == Token::ADD) {
3033 GenerateAddStrings(masm);
3034 }
3035
3036 __ bind(&call_runtime);
3037 GenerateCallRuntime(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003038}
3039
3040
Ben Murdoch257744e2011-11-30 15:57:28 +00003041void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3042 ASSERT(op_ == Token::ADD);
3043 Label left_not_string, call_runtime;
3044
3045 Register left = a1;
3046 Register right = a0;
3047
3048 // Check if left argument is a string.
3049 __ JumpIfSmi(left, &left_not_string);
3050 __ GetObjectType(left, a2, a2);
3051 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3052
3053 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3054 GenerateRegisterArgsPush(masm);
3055 __ TailCallStub(&string_add_left_stub);
3056
3057 // Left operand is not a string, test right.
3058 __ bind(&left_not_string);
3059 __ JumpIfSmi(right, &call_runtime);
3060 __ GetObjectType(right, a2, a2);
3061 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3062
3063 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3064 GenerateRegisterArgsPush(masm);
3065 __ TailCallStub(&string_add_right_stub);
3066
3067 // At least one argument is not a string.
3068 __ bind(&call_runtime);
3069}
3070
3071
3072void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3073 GenerateRegisterArgsPush(masm);
3074 switch (op_) {
3075 case Token::ADD:
3076 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3077 break;
3078 case Token::SUB:
3079 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3080 break;
3081 case Token::MUL:
3082 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3083 break;
3084 case Token::DIV:
3085 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3086 break;
3087 case Token::MOD:
3088 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3089 break;
3090 case Token::BIT_OR:
3091 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3092 break;
3093 case Token::BIT_AND:
3094 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3095 break;
3096 case Token::BIT_XOR:
3097 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3098 break;
3099 case Token::SAR:
3100 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3101 break;
3102 case Token::SHR:
3103 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3104 break;
3105 case Token::SHL:
3106 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3107 break;
3108 default:
3109 UNREACHABLE();
3110 }
3111}
3112
3113
3114void BinaryOpStub::GenerateHeapResultAllocation(
Steve Block44f0eee2011-05-26 01:26:41 +01003115 MacroAssembler* masm,
3116 Register result,
3117 Register heap_number_map,
3118 Register scratch1,
3119 Register scratch2,
3120 Label* gc_required) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003121
3122 // Code below will scratch result if allocation fails. To keep both arguments
3123 // intact for the runtime call result cannot be one of these.
3124 ASSERT(!result.is(a0) && !result.is(a1));
3125
3126 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3127 Label skip_allocation, allocated;
3128 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3129 // If the overwritable operand is already an object, we skip the
3130 // allocation of a heap number.
3131 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3132 // Allocate a heap number for the result.
3133 __ AllocateHeapNumber(
3134 result, scratch1, scratch2, heap_number_map, gc_required);
3135 __ Branch(&allocated);
3136 __ bind(&skip_allocation);
3137 // Use object holding the overwritable operand for result.
3138 __ mov(result, overwritable_operand);
3139 __ bind(&allocated);
3140 } else {
3141 ASSERT(mode_ == NO_OVERWRITE);
3142 __ AllocateHeapNumber(
3143 result, scratch1, scratch2, heap_number_map, gc_required);
3144 }
Steve Block44f0eee2011-05-26 01:26:41 +01003145}
3146
3147
Ben Murdoch257744e2011-11-30 15:57:28 +00003148void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3149 __ Push(a1, a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003150}
3151
3152
3153
3154void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003155 // Untagged case: double input in f4, double result goes
3156 // into f4.
3157 // Tagged case: tagged input on top of stack and in a0,
3158 // tagged result (heap number) goes into v0.
3159
3160 Label input_not_smi;
3161 Label loaded;
3162 Label calculate;
3163 Label invalid_cache;
3164 const Register scratch0 = t5;
3165 const Register scratch1 = t3;
3166 const Register cache_entry = a0;
3167 const bool tagged = (argument_type_ == TAGGED);
3168
3169 if (CpuFeatures::IsSupported(FPU)) {
3170 CpuFeatures::Scope scope(FPU);
3171
3172 if (tagged) {
3173 // Argument is a number and is on stack and in a0.
3174 // Load argument and check if it is a smi.
3175 __ JumpIfNotSmi(a0, &input_not_smi);
3176
3177 // Input is a smi. Convert to double and load the low and high words
3178 // of the double into a2, a3.
3179 __ sra(t0, a0, kSmiTagSize);
3180 __ mtc1(t0, f4);
3181 __ cvt_d_w(f4, f4);
3182 __ Move(a2, a3, f4);
3183 __ Branch(&loaded);
3184
3185 __ bind(&input_not_smi);
3186 // Check if input is a HeapNumber.
3187 __ CheckMap(a0,
3188 a1,
3189 Heap::kHeapNumberMapRootIndex,
3190 &calculate,
3191 DONT_DO_SMI_CHECK);
3192 // Input is a HeapNumber. Store the
3193 // low and high words into a2, a3.
3194 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3195 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3196 } else {
3197 // Input is untagged double in f4. Output goes to f4.
3198 __ Move(a2, a3, f4);
3199 }
3200 __ bind(&loaded);
3201 // a2 = low 32 bits of double value.
3202 // a3 = high 32 bits of double value.
3203 // Compute hash (the shifts are arithmetic):
3204 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3205 __ Xor(a1, a2, a3);
3206 __ sra(t0, a1, 16);
3207 __ Xor(a1, a1, t0);
3208 __ sra(t0, a1, 8);
3209 __ Xor(a1, a1, t0);
3210 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3211 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3212
3213 // a2 = low 32 bits of double value.
3214 // a3 = high 32 bits of double value.
3215 // a1 = TranscendentalCache::hash(double value).
3216 __ li(cache_entry, Operand(
3217 ExternalReference::transcendental_cache_array_address(
3218 masm->isolate())));
3219 // a0 points to cache array.
3220 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3221 Isolate::Current()->transcendental_cache()->caches_[0])));
3222 // a0 points to the cache for the type type_.
3223 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3224 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3225
3226#ifdef DEBUG
3227 // Check that the layout of cache elements match expectations.
3228 { TranscendentalCache::SubCache::Element test_elem[2];
3229 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3230 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3231 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3232 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3233 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3234 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3235 CHECK_EQ(0, elem_in0 - elem_start);
3236 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3237 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3238 }
3239#endif
3240
3241 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3242 __ sll(t0, a1, 1);
3243 __ Addu(a1, a1, t0);
3244 __ sll(t0, a1, 2);
3245 __ Addu(cache_entry, cache_entry, t0);
3246
3247 // Check if cache matches: Double value is stored in uint32_t[2] array.
3248 __ lw(t0, MemOperand(cache_entry, 0));
3249 __ lw(t1, MemOperand(cache_entry, 4));
3250 __ lw(t2, MemOperand(cache_entry, 8));
3251 __ Addu(cache_entry, cache_entry, 12);
3252 __ Branch(&calculate, ne, a2, Operand(t0));
3253 __ Branch(&calculate, ne, a3, Operand(t1));
3254 // Cache hit. Load result, cleanup and return.
3255 if (tagged) {
3256 // Pop input value from stack and load result into v0.
3257 __ Drop(1);
3258 __ mov(v0, t2);
3259 } else {
3260 // Load result into f4.
3261 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3262 }
3263 __ Ret();
3264 } // if (CpuFeatures::IsSupported(FPU))
3265
3266 __ bind(&calculate);
3267 if (tagged) {
3268 __ bind(&invalid_cache);
3269 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3270 masm->isolate()),
3271 1,
3272 1);
3273 } else {
3274 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3275 CpuFeatures::Scope scope(FPU);
3276
3277 Label no_update;
3278 Label skip_cache;
3279 const Register heap_number_map = t2;
3280
3281 // Call C function to calculate the result and update the cache.
3282 // Register a0 holds precalculated cache entry address; preserve
3283 // it on the stack and pop it into register cache_entry after the
3284 // call.
3285 __ push(cache_entry);
3286 GenerateCallCFunction(masm, scratch0);
3287 __ GetCFunctionDoubleResult(f4);
3288
3289 // Try to update the cache. If we cannot allocate a
3290 // heap number, we return the result without updating.
3291 __ pop(cache_entry);
3292 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3293 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3294 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3295
3296 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3297 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3298 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3299
3300 __ mov(v0, cache_entry);
3301 __ Ret();
3302
3303 __ bind(&invalid_cache);
3304 // The cache is invalid. Call runtime which will recreate the
3305 // cache.
3306 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3307 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3308 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3309 __ EnterInternalFrame();
3310 __ push(a0);
3311 __ CallRuntime(RuntimeFunction(), 1);
3312 __ LeaveInternalFrame();
3313 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3314 __ Ret();
3315
3316 __ bind(&skip_cache);
3317 // Call C function to calculate the result and answer directly
3318 // without updating the cache.
3319 GenerateCallCFunction(masm, scratch0);
3320 __ GetCFunctionDoubleResult(f4);
3321 __ bind(&no_update);
3322
3323 // We return the value in f4 without adding it to the cache, but
3324 // we cause a scavenging GC so that future allocations will succeed.
3325 __ EnterInternalFrame();
3326
3327 // Allocate an aligned object larger than a HeapNumber.
3328 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3329 __ li(scratch0, Operand(4 * kPointerSize));
3330 __ push(scratch0);
3331 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3332 __ LeaveInternalFrame();
3333 __ Ret();
3334 }
3335}
3336
3337
3338void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3339 Register scratch) {
3340 __ push(ra);
3341 __ PrepareCallCFunction(2, scratch);
3342 if (IsMipsSoftFloatABI) {
3343 __ Move(v0, v1, f4);
3344 } else {
3345 __ mov_d(f12, f4);
3346 }
3347 switch (type_) {
3348 case TranscendentalCache::SIN:
3349 __ CallCFunction(
3350 ExternalReference::math_sin_double_function(masm->isolate()), 2);
3351 break;
3352 case TranscendentalCache::COS:
3353 __ CallCFunction(
3354 ExternalReference::math_cos_double_function(masm->isolate()), 2);
3355 break;
3356 case TranscendentalCache::LOG:
3357 __ CallCFunction(
3358 ExternalReference::math_log_double_function(masm->isolate()), 2);
3359 break;
3360 default:
3361 UNIMPLEMENTED();
3362 break;
3363 }
3364 __ pop(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003365}
3366
3367
3368Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
Ben Murdoch257744e2011-11-30 15:57:28 +00003369 switch (type_) {
3370 // Add more cases when necessary.
3371 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3372 case TranscendentalCache::COS: return Runtime::kMath_cos;
3373 case TranscendentalCache::LOG: return Runtime::kMath_log;
3374 default:
3375 UNIMPLEMENTED();
3376 return Runtime::kAbort;
3377 }
Steve Block44f0eee2011-05-26 01:26:41 +01003378}
3379
3380
3381void StackCheckStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003382 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003383}
3384
3385
Ben Murdoch257744e2011-11-30 15:57:28 +00003386void MathPowStub::Generate(MacroAssembler* masm) {
3387 Label call_runtime;
3388
3389 if (CpuFeatures::IsSupported(FPU)) {
3390 CpuFeatures::Scope scope(FPU);
3391
3392 Label base_not_smi;
3393 Label exponent_not_smi;
3394 Label convert_exponent;
3395
3396 const Register base = a0;
3397 const Register exponent = a2;
3398 const Register heapnumbermap = t1;
3399 const Register heapnumber = s0; // Callee-saved register.
3400 const Register scratch = t2;
3401 const Register scratch2 = t3;
3402
3403 // Alocate FP values in the ABI-parameter-passing regs.
3404 const DoubleRegister double_base = f12;
3405 const DoubleRegister double_exponent = f14;
3406 const DoubleRegister double_result = f0;
3407 const DoubleRegister double_scratch = f2;
3408
3409 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3410 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3411 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3412
3413 // Convert base to double value and store it in f0.
3414 __ JumpIfNotSmi(base, &base_not_smi);
3415 // Base is a Smi. Untag and convert it.
3416 __ SmiUntag(base);
3417 __ mtc1(base, double_scratch);
3418 __ cvt_d_w(double_base, double_scratch);
3419 __ Branch(&convert_exponent);
3420
3421 __ bind(&base_not_smi);
3422 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3423 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3424 // Base is a heapnumber. Load it into double register.
3425 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3426
3427 __ bind(&convert_exponent);
3428 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3429 __ SmiUntag(exponent);
3430
3431 // The base is in a double register and the exponent is
3432 // an untagged smi. Allocate a heap number and call a
3433 // C function for integer exponents. The register containing
3434 // the heap number is callee-saved.
3435 __ AllocateHeapNumber(heapnumber,
3436 scratch,
3437 scratch2,
3438 heapnumbermap,
3439 &call_runtime);
3440 __ push(ra);
3441 __ PrepareCallCFunction(3, scratch);
3442 __ SetCallCDoubleArguments(double_base, exponent);
3443 __ CallCFunction(
3444 ExternalReference::power_double_int_function(masm->isolate()), 3);
3445 __ pop(ra);
3446 __ GetCFunctionDoubleResult(double_result);
3447 __ sdc1(double_result,
3448 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3449 __ mov(v0, heapnumber);
3450 __ DropAndRet(2 * kPointerSize);
3451
3452 __ bind(&exponent_not_smi);
3453 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3454 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3455 // Exponent is a heapnumber. Load it into double register.
3456 __ ldc1(double_exponent,
3457 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3458
3459 // The base and the exponent are in double registers.
3460 // Allocate a heap number and call a C function for
3461 // double exponents. The register containing
3462 // the heap number is callee-saved.
3463 __ AllocateHeapNumber(heapnumber,
3464 scratch,
3465 scratch2,
3466 heapnumbermap,
3467 &call_runtime);
3468 __ push(ra);
3469 __ PrepareCallCFunction(4, scratch);
3470 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3471 ASSERT(double_base.is(f12));
3472 ASSERT(double_exponent.is(f14));
3473 __ SetCallCDoubleArguments(double_base, double_exponent);
3474 __ CallCFunction(
3475 ExternalReference::power_double_double_function(masm->isolate()), 4);
3476 __ pop(ra);
3477 __ GetCFunctionDoubleResult(double_result);
3478 __ sdc1(double_result,
3479 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3480 __ mov(v0, heapnumber);
3481 __ DropAndRet(2 * kPointerSize);
3482 }
3483
3484 __ bind(&call_runtime);
3485 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01003486}
3487
3488
3489bool CEntryStub::NeedsImmovableCode() {
3490 return true;
3491}
3492
3493
3494void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003495 __ Throw(v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003496}
3497
3498
3499void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3500 UncatchableExceptionType type) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003501 __ ThrowUncatchable(type, v0);
Steve Block44f0eee2011-05-26 01:26:41 +01003502}
3503
3504
3505void CEntryStub::GenerateCore(MacroAssembler* masm,
3506 Label* throw_normal_exception,
3507 Label* throw_termination_exception,
3508 Label* throw_out_of_memory_exception,
3509 bool do_gc,
3510 bool always_allocate) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003511 // v0: result parameter for PerformGC, if any
3512 // s0: number of arguments including receiver (C callee-saved)
3513 // s1: pointer to the first argument (C callee-saved)
3514 // s2: pointer to builtin function (C callee-saved)
3515
3516 if (do_gc) {
3517 // Move result passed in v0 into a0 to call PerformGC.
3518 __ mov(a0, v0);
3519 __ PrepareCallCFunction(1, a1);
3520 __ CallCFunction(
3521 ExternalReference::perform_gc_function(masm->isolate()), 1);
3522 }
3523
3524 ExternalReference scope_depth =
3525 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3526 if (always_allocate) {
3527 __ li(a0, Operand(scope_depth));
3528 __ lw(a1, MemOperand(a0));
3529 __ Addu(a1, a1, Operand(1));
3530 __ sw(a1, MemOperand(a0));
3531 }
3532
3533 // Prepare arguments for C routine: a0 = argc, a1 = argv
3534 __ mov(a0, s0);
3535 __ mov(a1, s1);
3536
3537 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3538 // also need to reserve the 4 argument slots on the stack.
3539
3540 __ AssertStackIsAligned();
3541
3542 __ li(a2, Operand(ExternalReference::isolate_address()));
3543
3544 // From arm version of this function:
3545 // TODO(1242173): To let the GC traverse the return address of the exit
3546 // frames, we need to know where the return address is. Right now,
3547 // we push it on the stack to be able to find it again, but we never
3548 // restore from it in case of changes, which makes it impossible to
3549 // support moving the C entry code stub. This should be fixed, but currently
3550 // this is OK because the CEntryStub gets generated so early in the V8 boot
3551 // sequence that it is not moving ever.
3552
3553 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3554 // This branch-and-link sequence is needed to find the current PC on mips,
3555 // saved to the ra register.
3556 // Use masm-> here instead of the double-underscore macro since extra
3557 // coverage code can interfere with the proper calculation of ra.
3558 Label find_ra;
3559 masm->bal(&find_ra); // bal exposes branch delay slot.
3560 masm->nop(); // Branch delay slot nop.
3561 masm->bind(&find_ra);
3562
3563 // Adjust the value in ra to point to the correct return location, 2nd
3564 // instruction past the real call into C code (the jalr(t9)), and push it.
3565 // This is the return address of the exit frame.
3566 const int kNumInstructionsToJump = 6;
3567 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3568 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3569 masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3570 // Stack is still aligned.
3571
3572 // Call the C routine.
3573 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3574 masm->jalr(t9);
3575 masm->nop(); // Branch delay slot nop.
3576 // Make sure the stored 'ra' points to this position.
3577 ASSERT_EQ(kNumInstructionsToJump,
3578 masm->InstructionsGeneratedSince(&find_ra));
3579 }
3580
3581 // Restore stack (remove arg slots).
3582 __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3583
3584 if (always_allocate) {
3585 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3586 __ li(a2, Operand(scope_depth));
3587 __ lw(a3, MemOperand(a2));
3588 __ Subu(a3, a3, Operand(1));
3589 __ sw(a3, MemOperand(a2));
3590 }
3591
3592 // Check for failure result.
3593 Label failure_returned;
3594 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3595 __ addiu(a2, v0, 1);
3596 __ andi(t0, a2, kFailureTagMask);
3597 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3598
3599 // Exit C frame and return.
3600 // v0:v1: result
3601 // sp: stack pointer
3602 // fp: frame pointer
3603 __ LeaveExitFrame(save_doubles_, s0);
3604 __ Ret();
3605
3606 // Check if we should retry or throw exception.
3607 Label retry;
3608 __ bind(&failure_returned);
3609 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3610 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3611 __ Branch(&retry, eq, t0, Operand(zero_reg));
3612
3613 // Special handling of out of memory exceptions.
3614 Failure* out_of_memory = Failure::OutOfMemoryException();
3615 __ Branch(throw_out_of_memory_exception, eq,
3616 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3617
3618 // Retrieve the pending exception and clear the variable.
3619 __ li(t0,
3620 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3621 __ lw(a3, MemOperand(t0));
3622 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3623 masm->isolate())));
3624 __ lw(v0, MemOperand(t0));
3625 __ sw(a3, MemOperand(t0));
3626
3627 // Special handling of termination exceptions which are uncatchable
3628 // by javascript code.
3629 __ Branch(throw_termination_exception, eq,
3630 v0, Operand(masm->isolate()->factory()->termination_exception()));
3631
3632 // Handle normal exception.
3633 __ jmp(throw_normal_exception);
3634
3635 __ bind(&retry);
3636 // Last failure (v0) will be moved to (a0) for parameter when retrying.
Steve Block44f0eee2011-05-26 01:26:41 +01003637}
3638
3639
3640void CEntryStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003641 // Called from JavaScript; parameters are on stack as if calling JS function
3642 // a0: number of arguments including receiver
3643 // a1: pointer to builtin function
3644 // fp: frame pointer (restored after C call)
3645 // sp: stack pointer (restored as callee's sp after C call)
3646 // cp: current context (C callee-saved)
3647
3648 // NOTE: Invocations of builtins may return failure objects
3649 // instead of a proper result. The builtin entry handles
3650 // this by performing a garbage collection and retrying the
3651 // builtin once.
3652
3653 // Compute the argv pointer in a callee-saved register.
3654 __ sll(s1, a0, kPointerSizeLog2);
3655 __ Addu(s1, sp, s1);
3656 __ Subu(s1, s1, Operand(kPointerSize));
3657
3658 // Enter the exit frame that transitions from JavaScript to C++.
3659 __ EnterExitFrame(save_doubles_);
3660
3661 // Setup argc and the builtin function in callee-saved registers.
3662 __ mov(s0, a0);
3663 __ mov(s2, a1);
3664
3665 // s0: number of arguments (C callee-saved)
3666 // s1: pointer to first argument (C callee-saved)
3667 // s2: pointer to builtin function (C callee-saved)
3668
3669 Label throw_normal_exception;
3670 Label throw_termination_exception;
3671 Label throw_out_of_memory_exception;
3672
3673 // Call into the runtime system.
3674 GenerateCore(masm,
3675 &throw_normal_exception,
3676 &throw_termination_exception,
3677 &throw_out_of_memory_exception,
3678 false,
3679 false);
3680
3681 // Do space-specific GC and retry runtime call.
3682 GenerateCore(masm,
3683 &throw_normal_exception,
3684 &throw_termination_exception,
3685 &throw_out_of_memory_exception,
3686 true,
3687 false);
3688
3689 // Do full GC and retry runtime call one final time.
3690 Failure* failure = Failure::InternalError();
3691 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3692 GenerateCore(masm,
3693 &throw_normal_exception,
3694 &throw_termination_exception,
3695 &throw_out_of_memory_exception,
3696 true,
3697 true);
3698
3699 __ bind(&throw_out_of_memory_exception);
3700 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3701
3702 __ bind(&throw_termination_exception);
3703 GenerateThrowUncatchable(masm, TERMINATION);
3704
3705 __ bind(&throw_normal_exception);
3706 GenerateThrowTOS(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01003707}
3708
3709
3710void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003711 Label invoke, exit;
3712
3713 // Registers:
3714 // a0: entry address
3715 // a1: function
3716 // a2: reveiver
3717 // a3: argc
3718 //
3719 // Stack:
3720 // 4 args slots
3721 // args
3722
3723 // Save callee saved registers on the stack.
3724 __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
3725
3726 // Load argv in s0 register.
3727 __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
3728 StandardFrameConstants::kCArgsSlotsSize));
3729
3730 // We build an EntryFrame.
3731 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3732 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3733 __ li(t2, Operand(Smi::FromInt(marker)));
3734 __ li(t1, Operand(Smi::FromInt(marker)));
3735 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3736 masm->isolate())));
3737 __ lw(t0, MemOperand(t0));
3738 __ Push(t3, t2, t1, t0);
3739 // Setup frame pointer for the frame to be pushed.
3740 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3741
3742 // Registers:
3743 // a0: entry_address
3744 // a1: function
3745 // a2: reveiver_pointer
3746 // a3: argc
3747 // s0: argv
3748 //
3749 // Stack:
3750 // caller fp |
3751 // function slot | entry frame
3752 // context slot |
3753 // bad fp (0xff...f) |
3754 // callee saved registers + ra
3755 // 4 args slots
3756 // args
3757
3758 #ifdef ENABLE_LOGGING_AND_PROFILING
3759 // If this is the outermost JS call, set js_entry_sp value.
3760 Label non_outermost_js;
3761 ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
3762 masm->isolate());
3763 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3764 __ lw(t2, MemOperand(t1));
3765 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
3766 __ sw(fp, MemOperand(t1));
3767 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3768 Label cont;
3769 __ b(&cont);
3770 __ nop(); // Branch delay slot nop.
3771 __ bind(&non_outermost_js);
3772 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3773 __ bind(&cont);
3774 __ push(t0);
3775 #endif
3776
3777 // Call a faked try-block that does the invoke.
3778 __ bal(&invoke); // bal exposes branch delay slot.
3779 __ nop(); // Branch delay slot nop.
3780
3781 // Caught exception: Store result (exception) in the pending
3782 // exception field in the JSEnv and return a failure sentinel.
3783 // Coming in here the fp will be invalid because the PushTryHandler below
3784 // sets it to 0 to signal the existence of the JSEntry frame.
3785 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3786 masm->isolate())));
3787 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
3788 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3789 __ b(&exit); // b exposes branch delay slot.
3790 __ nop(); // Branch delay slot nop.
3791
3792 // Invoke: Link this frame into the handler chain.
3793 __ bind(&invoke);
3794 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
3795 // If an exception not caught by another handler occurs, this handler
3796 // returns control to the code after the bal(&invoke) above, which
3797 // restores all kCalleeSaved registers (including cp and fp) to their
3798 // saved values before returning a failure to C.
3799
3800 // Clear any pending exceptions.
3801 __ li(t0,
3802 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3803 __ lw(t1, MemOperand(t0));
3804 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3805 masm->isolate())));
3806 __ sw(t1, MemOperand(t0));
3807
3808 // Invoke the function by calling through JS entry trampoline builtin.
3809 // Notice that we cannot store a reference to the trampoline code directly in
3810 // this stub, because runtime stubs are not traversed when doing GC.
3811
3812 // Registers:
3813 // a0: entry_address
3814 // a1: function
3815 // a2: reveiver_pointer
3816 // a3: argc
3817 // s0: argv
3818 //
3819 // Stack:
3820 // handler frame
3821 // entry frame
3822 // callee saved registers + ra
3823 // 4 args slots
3824 // args
3825
3826 if (is_construct) {
3827 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
3828 masm->isolate());
3829 __ li(t0, Operand(construct_entry));
3830 } else {
3831 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
3832 __ li(t0, Operand(entry));
3833 }
3834 __ lw(t9, MemOperand(t0)); // Deref address.
3835
3836 // Call JSEntryTrampoline.
3837 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
3838 __ Call(t9);
3839
3840 // Unlink this frame from the handler chain.
3841 __ PopTryHandler();
3842
3843 __ bind(&exit); // v0 holds result
3844 #ifdef ENABLE_LOGGING_AND_PROFILING
3845 // Check if the current stack frame is marked as the outermost JS frame.
3846 Label non_outermost_js_2;
3847 __ pop(t1);
3848 __ Branch(&non_outermost_js_2, ne, t1,
3849 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3850 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3851 __ sw(zero_reg, MemOperand(t1));
3852 __ bind(&non_outermost_js_2);
3853 #endif
3854
3855 // Restore the top frame descriptors from the stack.
3856 __ pop(t1);
3857 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3858 masm->isolate())));
3859 __ sw(t1, MemOperand(t0));
3860
3861 // Reset the stack to the callee saved registers.
3862 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
3863
3864 // Restore callee saved registers from the stack.
3865 __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
3866 // Return.
3867 __ Jump(ra);
Steve Block44f0eee2011-05-26 01:26:41 +01003868}
3869
3870
Ben Murdoch257744e2011-11-30 15:57:28 +00003871// Uses registers a0 to t0.
3872// Expected input (depending on whether args are in registers or on the stack):
3873// * object: a0 or at sp + 1 * kPointerSize.
3874// * function: a1 or at sp.
3875//
3876// Inlined call site patching is a crankshaft-specific feature that is not
3877// implemented on MIPS.
Steve Block44f0eee2011-05-26 01:26:41 +01003878void InstanceofStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003879 // This is a crankshaft-specific feature that has not been implemented yet.
3880 ASSERT(!HasCallSiteInlineCheck());
3881 // Call site inlining and patching implies arguments in registers.
3882 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3883 // ReturnTrueFalse is only implemented for inlined call sites.
3884 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3885
3886 // Fixed register usage throughout the stub:
3887 const Register object = a0; // Object (lhs).
3888 Register map = a3; // Map of the object.
3889 const Register function = a1; // Function (rhs).
3890 const Register prototype = t0; // Prototype of the function.
3891 const Register inline_site = t5;
3892 const Register scratch = a2;
3893
3894 Label slow, loop, is_instance, is_not_instance, not_js_object;
3895
3896 if (!HasArgsInRegisters()) {
3897 __ lw(object, MemOperand(sp, 1 * kPointerSize));
3898 __ lw(function, MemOperand(sp, 0));
3899 }
3900
3901 // Check that the left hand is a JS object and load map.
3902 __ JumpIfSmi(object, &not_js_object);
3903 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
3904
3905 // If there is a call site cache don't look in the global cache, but do the
3906 // real lookup and update the call site cache.
3907 if (!HasCallSiteInlineCheck()) {
3908 Label miss;
3909 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
3910 __ Branch(&miss, ne, function, Operand(t1));
3911 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
3912 __ Branch(&miss, ne, map, Operand(t1));
3913 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3914 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3915
3916 __ bind(&miss);
3917 }
3918
3919 // Get the prototype of the function.
3920 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
3921
3922 // Check that the function prototype is a JS object.
3923 __ JumpIfSmi(prototype, &slow);
3924 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
3925
3926 // Update the global instanceof or call site inlined cache with the current
3927 // map and function. The cached answer will be set when it is known below.
3928 if (!HasCallSiteInlineCheck()) {
3929 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3930 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3931 } else {
3932 UNIMPLEMENTED_MIPS();
3933 }
3934
3935 // Register mapping: a3 is object map and t0 is function prototype.
3936 // Get prototype of object into a2.
3937 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3938
3939 // We don't need map any more. Use it as a scratch register.
3940 Register scratch2 = map;
3941 map = no_reg;
3942
3943 // Loop through the prototype chain looking for the function prototype.
3944 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
3945 __ bind(&loop);
3946 __ Branch(&is_instance, eq, scratch, Operand(prototype));
3947 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
3948 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
3949 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
3950 __ Branch(&loop);
3951
3952 __ bind(&is_instance);
3953 ASSERT(Smi::FromInt(0) == 0);
3954 if (!HasCallSiteInlineCheck()) {
3955 __ mov(v0, zero_reg);
3956 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3957 } else {
3958 UNIMPLEMENTED_MIPS();
3959 }
3960 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3961
3962 __ bind(&is_not_instance);
3963 if (!HasCallSiteInlineCheck()) {
3964 __ li(v0, Operand(Smi::FromInt(1)));
3965 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3966 } else {
3967 UNIMPLEMENTED_MIPS();
3968 }
3969 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3970
3971 Label object_not_null, object_not_null_or_smi;
3972 __ bind(&not_js_object);
3973 // Before null, smi and string value checks, check that the rhs is a function
3974 // as for a non-function rhs an exception needs to be thrown.
3975 __ JumpIfSmi(function, &slow);
3976 __ GetObjectType(function, scratch2, scratch);
3977 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
3978
3979 // Null is not instance of anything.
3980 __ Branch(&object_not_null, ne, scratch,
3981 Operand(masm->isolate()->factory()->null_value()));
3982 __ li(v0, Operand(Smi::FromInt(1)));
3983 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3984
3985 __ bind(&object_not_null);
3986 // Smi values are not instances of anything.
3987 __ JumpIfNotSmi(object, &object_not_null_or_smi);
3988 __ li(v0, Operand(Smi::FromInt(1)));
3989 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3990
3991 __ bind(&object_not_null_or_smi);
3992 // String values are not instances of anything.
3993 __ IsObjectJSStringType(object, scratch, &slow);
3994 __ li(v0, Operand(Smi::FromInt(1)));
3995 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3996
3997 // Slow-case. Tail call builtin.
3998 __ bind(&slow);
3999 if (!ReturnTrueFalseObject()) {
4000 if (HasArgsInRegisters()) {
4001 __ Push(a0, a1);
4002 }
4003 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4004 } else {
4005 __ EnterInternalFrame();
4006 __ Push(a0, a1);
4007 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4008 __ LeaveInternalFrame();
4009 __ mov(a0, v0);
4010 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4011 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4012 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4013 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4014 }
Steve Block44f0eee2011-05-26 01:26:41 +01004015}
4016
4017
Ben Murdoch257744e2011-11-30 15:57:28 +00004018Register InstanceofStub::left() { return a0; }
4019
4020
4021Register InstanceofStub::right() { return a1; }
4022
4023
Steve Block44f0eee2011-05-26 01:26:41 +01004024void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004025 // The displacement is the offset of the last parameter (if any)
4026 // relative to the frame pointer.
4027 static const int kDisplacement =
4028 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4029
4030 // Check that the key is a smiGenerateReadElement.
4031 Label slow;
4032 __ JumpIfNotSmi(a1, &slow);
4033
4034 // Check if the calling frame is an arguments adaptor frame.
4035 Label adaptor;
4036 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4037 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4038 __ Branch(&adaptor,
4039 eq,
4040 a3,
4041 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4042
4043 // Check index (a1) against formal parameters count limit passed in
4044 // through register a0. Use unsigned comparison to get negative
4045 // check for free.
4046 __ Branch(&slow, hs, a1, Operand(a0));
4047
4048 // Read the argument from the stack and return it.
4049 __ subu(a3, a0, a1);
4050 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4051 __ Addu(a3, fp, Operand(t3));
4052 __ lw(v0, MemOperand(a3, kDisplacement));
4053 __ Ret();
4054
4055 // Arguments adaptor case: Check index (a1) against actual arguments
4056 // limit found in the arguments adaptor frame. Use unsigned
4057 // comparison to get negative check for free.
4058 __ bind(&adaptor);
4059 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4060 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4061
4062 // Read the argument from the adaptor frame and return it.
4063 __ subu(a3, a0, a1);
4064 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4065 __ Addu(a3, a2, Operand(t3));
4066 __ lw(v0, MemOperand(a3, kDisplacement));
4067 __ Ret();
4068
4069 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4070 // by calling the runtime system.
4071 __ bind(&slow);
4072 __ push(a1);
4073 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004074}
4075
4076
4077void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004078 // sp[0] : number of parameters
4079 // sp[4] : receiver displacement
4080 // sp[8] : function
4081
4082 // Check if the calling frame is an arguments adaptor frame.
4083 Label adaptor_frame, try_allocate, runtime;
4084 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4085 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4086 __ Branch(&adaptor_frame,
4087 eq,
4088 a3,
4089 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4090
4091 // Get the length from the frame.
4092 __ lw(a1, MemOperand(sp, 0));
4093 __ Branch(&try_allocate);
4094
4095 // Patch the arguments.length and the parameters pointer.
4096 __ bind(&adaptor_frame);
4097 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4098 __ sw(a1, MemOperand(sp, 0));
4099 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4100 __ Addu(a3, a2, Operand(at));
4101
4102 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4103 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4104
4105 // Try the new space allocation. Start out with computing the size
4106 // of the arguments object and the elements array in words.
4107 Label add_arguments_object;
4108 __ bind(&try_allocate);
4109 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4110 __ srl(a1, a1, kSmiTagSize);
4111
4112 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4113 __ bind(&add_arguments_object);
4114 __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
4115
4116 // Do the allocation of both objects in one go.
4117 __ AllocateInNewSpace(
4118 a1,
4119 v0,
4120 a2,
4121 a3,
4122 &runtime,
4123 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4124
4125 // Get the arguments boilerplate from the current (global) context.
4126 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4127 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4128 __ lw(t0, MemOperand(t0,
4129 Context::SlotOffset(GetArgumentsBoilerplateIndex())));
4130
4131 // Copy the JS object part.
4132 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4133
4134 if (type_ == NEW_NON_STRICT) {
4135 // Setup the callee in-object property.
4136 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4137 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4138 const int kCalleeOffset = JSObject::kHeaderSize +
4139 Heap::kArgumentsCalleeIndex * kPointerSize;
4140 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4141 }
4142
4143 // Get the length (smi tagged) and set that as an in-object property too.
4144 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4145 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4146 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4147 Heap::kArgumentsLengthIndex * kPointerSize));
4148
4149 Label done;
4150 __ Branch(&done, eq, a1, Operand(zero_reg));
4151
4152 // Get the parameters pointer from the stack.
4153 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4154
4155 // Setup the elements pointer in the allocated arguments object and
4156 // initialize the header in the elements fixed array.
4157 __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
4158 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4159 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4160 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4161 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4162 __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
4163
4164 // Copy the fixed array slots.
4165 Label loop;
4166 // Setup t0 to point to the first array slot.
4167 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4168 __ bind(&loop);
4169 // Pre-decrement a2 with kPointerSize on each iteration.
4170 // Pre-decrement in order to skip receiver.
4171 __ Addu(a2, a2, Operand(-kPointerSize));
4172 __ lw(a3, MemOperand(a2));
4173 // Post-increment t0 with kPointerSize on each iteration.
4174 __ sw(a3, MemOperand(t0));
4175 __ Addu(t0, t0, Operand(kPointerSize));
4176 __ Subu(a1, a1, Operand(1));
4177 __ Branch(&loop, ne, a1, Operand(zero_reg));
4178
4179 // Return and remove the on-stack parameters.
4180 __ bind(&done);
4181 __ Addu(sp, sp, Operand(3 * kPointerSize));
4182 __ Ret();
4183
4184 // Do the runtime call to allocate the arguments object.
4185 __ bind(&runtime);
4186 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004187}
4188
4189
4190void RegExpExecStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004191 // Just jump directly to runtime if native RegExp is not selected at compile
4192 // time or if regexp entry in generated code is turned off runtime switch or
4193 // at compilation.
4194#ifdef V8_INTERPRETED_REGEXP
4195 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4196#else // V8_INTERPRETED_REGEXP
4197 if (!FLAG_regexp_entry_native) {
4198 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4199 return;
4200 }
4201
4202 // Stack frame on entry.
4203 // sp[0]: last_match_info (expected JSArray)
4204 // sp[4]: previous index
4205 // sp[8]: subject string
4206 // sp[12]: JSRegExp object
4207
4208 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4209 static const int kPreviousIndexOffset = 1 * kPointerSize;
4210 static const int kSubjectOffset = 2 * kPointerSize;
4211 static const int kJSRegExpOffset = 3 * kPointerSize;
4212
4213 Label runtime, invoke_regexp;
4214
4215 // Allocation of registers for this function. These are in callee save
4216 // registers and will be preserved by the call to the native RegExp code, as
4217 // this code is called using the normal C calling convention. When calling
4218 // directly from generated code the native RegExp code will not do a GC and
4219 // therefore the content of these registers are safe to use after the call.
4220 // MIPS - using s0..s2, since we are not using CEntry Stub.
4221 Register subject = s0;
4222 Register regexp_data = s1;
4223 Register last_match_info_elements = s2;
4224
4225 // Ensure that a RegExp stack is allocated.
4226 ExternalReference address_of_regexp_stack_memory_address =
4227 ExternalReference::address_of_regexp_stack_memory_address(
4228 masm->isolate());
4229 ExternalReference address_of_regexp_stack_memory_size =
4230 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
4231 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4232 __ lw(a0, MemOperand(a0, 0));
4233 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4234
4235 // Check that the first argument is a JSRegExp object.
4236 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4237 STATIC_ASSERT(kSmiTag == 0);
4238 __ JumpIfSmi(a0, &runtime);
4239 __ GetObjectType(a0, a1, a1);
4240 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4241
4242 // Check that the RegExp has been compiled (data contains a fixed array).
4243 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4244 if (FLAG_debug_code) {
4245 __ And(t0, regexp_data, Operand(kSmiTagMask));
4246 __ Check(nz,
4247 "Unexpected type for RegExp data, FixedArray expected",
4248 t0,
4249 Operand(zero_reg));
4250 __ GetObjectType(regexp_data, a0, a0);
4251 __ Check(eq,
4252 "Unexpected type for RegExp data, FixedArray expected",
4253 a0,
4254 Operand(FIXED_ARRAY_TYPE));
4255 }
4256
4257 // regexp_data: RegExp data (FixedArray)
4258 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4259 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4260 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4261
4262 // regexp_data: RegExp data (FixedArray)
4263 // Check that the number of captures fit in the static offsets vector buffer.
4264 __ lw(a2,
4265 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4266 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4267 // uses the asumption that smis are 2 * their untagged value.
4268 STATIC_ASSERT(kSmiTag == 0);
4269 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4270 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4271 // Check that the static offsets vector buffer is large enough.
4272 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4273
4274 // a2: Number of capture registers
4275 // regexp_data: RegExp data (FixedArray)
4276 // Check that the second argument is a string.
4277 __ lw(subject, MemOperand(sp, kSubjectOffset));
4278 __ JumpIfSmi(subject, &runtime);
4279 __ GetObjectType(subject, a0, a0);
4280 __ And(a0, a0, Operand(kIsNotStringMask));
4281 STATIC_ASSERT(kStringTag == 0);
4282 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4283
4284 // Get the length of the string to r3.
4285 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4286
4287 // a2: Number of capture registers
4288 // a3: Length of subject string as a smi
4289 // subject: Subject string
4290 // regexp_data: RegExp data (FixedArray)
4291 // Check that the third argument is a positive smi less than the subject
4292 // string length. A negative value will be greater (unsigned comparison).
4293 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4294 __ And(at, a0, Operand(kSmiTagMask));
4295 __ Branch(&runtime, ne, at, Operand(zero_reg));
4296 __ Branch(&runtime, ls, a3, Operand(a0));
4297
4298 // a2: Number of capture registers
4299 // subject: Subject string
4300 // regexp_data: RegExp data (FixedArray)
4301 // Check that the fourth object is a JSArray object.
4302 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4303 __ JumpIfSmi(a0, &runtime);
4304 __ GetObjectType(a0, a1, a1);
4305 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4306 // Check that the JSArray is in fast case.
4307 __ lw(last_match_info_elements,
4308 FieldMemOperand(a0, JSArray::kElementsOffset));
4309 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4310 __ Branch(&runtime, ne, a0, Operand(
4311 masm->isolate()->factory()->fixed_array_map()));
4312 // Check that the last match info has space for the capture registers and the
4313 // additional information.
4314 __ lw(a0,
4315 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4316 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4317 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4318 __ Branch(&runtime, gt, a2, Operand(at));
4319 // subject: Subject string
4320 // regexp_data: RegExp data (FixedArray)
4321 // Check the representation and encoding of the subject string.
4322 Label seq_string;
4323 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4324 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4325 // First check for flat string.
4326 __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
4327 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4328 __ Branch(&seq_string, eq, at, Operand(zero_reg));
4329
4330 // subject: Subject string
4331 // a0: instance type if Subject string
4332 // regexp_data: RegExp data (FixedArray)
4333 // Check for flat cons string.
4334 // A flat cons string is a cons string where the second part is the empty
4335 // string. In that case the subject string is just the first part of the cons
4336 // string. Also in this case the first part of the cons string is known to be
4337 // a sequential string or an external string.
4338 STATIC_ASSERT(kExternalStringTag != 0);
4339 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
4340 __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
4341 __ Branch(&runtime, ne, at, Operand(zero_reg));
4342 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4343 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4344 __ Branch(&runtime, ne, a0, Operand(a1));
4345 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4346 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4347 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4348 // Is first part a flat string?
4349 STATIC_ASSERT(kSeqStringTag == 0);
4350 __ And(at, a0, Operand(kStringRepresentationMask));
4351 __ Branch(&runtime, ne, at, Operand(zero_reg));
4352
4353 __ bind(&seq_string);
4354 // subject: Subject string
4355 // regexp_data: RegExp data (FixedArray)
4356 // a0: Instance type of subject string
4357 STATIC_ASSERT(kStringEncodingMask == 4);
4358 STATIC_ASSERT(kAsciiStringTag == 4);
4359 STATIC_ASSERT(kTwoByteStringTag == 0);
4360 // Find the code object based on the assumptions above.
4361 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
4362 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
4363 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
4364 __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
4365 __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
4366
4367 // Check that the irregexp code has been generated for the actual string
4368 // encoding. If it has, the field contains a code object otherwise it
4369 // contains the hole.
4370 __ GetObjectType(t9, a0, a0);
4371 __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
4372
4373 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4374 // t9: code
4375 // subject: Subject string
4376 // regexp_data: RegExp data (FixedArray)
4377 // Load used arguments before starting to push arguments for call to native
4378 // RegExp code to avoid handling changing stack height.
4379 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4380 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
4381
4382 // a1: previous index
4383 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4384 // t9: code
4385 // subject: Subject string
4386 // regexp_data: RegExp data (FixedArray)
4387 // All checks done. Now push arguments for native regexp code.
4388 __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
4389 1, a0, a2);
4390
4391 // Isolates: note we add an additional parameter here (isolate pointer).
4392 static const int kRegExpExecuteArguments = 8;
4393 static const int kParameterRegisters = 4;
4394 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4395
4396 // Stack pointer now points to cell where return address is to be written.
4397 // Arguments are before that on the stack or in registers, meaning we
4398 // treat the return address as argument 5. Thus every argument after that
4399 // needs to be shifted back by 1. Since DirectCEntryStub will handle
4400 // allocating space for the c argument slots, we don't need to calculate
4401 // that into the argument positions on the stack. This is how the stack will
4402 // look (sp meaning the value of sp at this moment):
4403 // [sp + 4] - Argument 8
4404 // [sp + 3] - Argument 7
4405 // [sp + 2] - Argument 6
4406 // [sp + 1] - Argument 5
4407 // [sp + 0] - saved ra
4408
4409 // Argument 8: Pass current isolate address.
4410 // CFunctionArgumentOperand handles MIPS stack argument slots.
4411 __ li(a0, Operand(ExternalReference::isolate_address()));
4412 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4413
4414 // Argument 7: Indicate that this is a direct call from JavaScript.
4415 __ li(a0, Operand(1));
4416 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4417
4418 // Argument 6: Start (high end) of backtracking stack memory area.
4419 __ li(a0, Operand(address_of_regexp_stack_memory_address));
4420 __ lw(a0, MemOperand(a0, 0));
4421 __ li(a2, Operand(address_of_regexp_stack_memory_size));
4422 __ lw(a2, MemOperand(a2, 0));
4423 __ addu(a0, a0, a2);
4424 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4425
4426 // Argument 5: static offsets vector buffer.
4427 __ li(a0, Operand(
4428 ExternalReference::address_of_static_offsets_vector(masm->isolate())));
4429 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4430
4431 // For arguments 4 and 3 get string length, calculate start of string data
4432 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
4433 __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
4434 __ sra(a0, a0, kSmiTagSize);
4435 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4436 __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4437 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
4438 // Argument 4 (a3): End of string data
4439 // Argument 3 (a2): Start of string data
4440 __ sllv(t1, a1, a3);
4441 __ addu(a2, t0, t1);
4442 __ sllv(t1, a0, a3);
4443 __ addu(a3, t0, t1);
4444
4445 // Argument 2 (a1): Previous index.
4446 // Already there
4447
4448 // Argument 1 (a0): Subject string.
4449 __ mov(a0, subject);
4450
4451 // Locate the code entry and call it.
4452 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4453 DirectCEntryStub stub;
4454 stub.GenerateCall(masm, t9);
4455
4456 __ LeaveExitFrame(false, no_reg);
4457
4458 // v0: result
4459 // subject: subject string (callee saved)
4460 // regexp_data: RegExp data (callee saved)
4461 // last_match_info_elements: Last match info elements (callee saved)
4462
4463 // Check the result.
4464
4465 Label success;
4466 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4467 Label failure;
4468 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
4469 // If not exception it can only be retry. Handle that in the runtime system.
4470 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4471 // Result must now be exception. If there is no pending exception already a
4472 // stack overflow (on the backtrack stack) was detected in RegExp code but
4473 // haven't created the exception yet. Handle that in the runtime system.
4474 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4475 __ li(a1, Operand(
4476 ExternalReference::the_hole_value_location(masm->isolate())));
4477 __ lw(a1, MemOperand(a1, 0));
4478 __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
4479 masm->isolate())));
4480 __ lw(v0, MemOperand(a2, 0));
4481 __ Branch(&runtime, eq, v0, Operand(a1));
4482
4483 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
4484
4485 // Check if the exception is a termination. If so, throw as uncatchable.
4486 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
4487 Label termination_exception;
4488 __ Branch(&termination_exception, eq, v0, Operand(a0));
4489
4490 __ Throw(a0); // Expects thrown value in v0.
4491
4492 __ bind(&termination_exception);
4493 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
4494
4495 __ bind(&failure);
4496 // For failure and exception return null.
4497 __ li(v0, Operand(masm->isolate()->factory()->null_value()));
4498 __ Addu(sp, sp, Operand(4 * kPointerSize));
4499 __ Ret();
4500
4501 // Process the result from the native regexp code.
4502 __ bind(&success);
4503 __ lw(a1,
4504 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4505 // Calculate number of capture registers (number_of_captures + 1) * 2.
4506 STATIC_ASSERT(kSmiTag == 0);
4507 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4508 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
4509
4510 // a1: number of capture registers
4511 // subject: subject string
4512 // Store the capture count.
4513 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
4514 __ sw(a2, FieldMemOperand(last_match_info_elements,
4515 RegExpImpl::kLastCaptureCountOffset));
4516 // Store last subject and last input.
4517 __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
4518 __ sw(subject,
4519 FieldMemOperand(last_match_info_elements,
4520 RegExpImpl::kLastSubjectOffset));
4521 __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
4522 __ sw(subject,
4523 FieldMemOperand(last_match_info_elements,
4524 RegExpImpl::kLastInputOffset));
4525 __ mov(a3, last_match_info_elements);
4526 __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
4527
4528 // Get the static offsets vector filled by the native regexp code.
4529 ExternalReference address_of_static_offsets_vector =
4530 ExternalReference::address_of_static_offsets_vector(masm->isolate());
4531 __ li(a2, Operand(address_of_static_offsets_vector));
4532
4533 // a1: number of capture registers
4534 // a2: offsets vector
4535 Label next_capture, done;
4536 // Capture register counter starts from number of capture registers and
4537 // counts down until wrapping after zero.
4538 __ Addu(a0,
4539 last_match_info_elements,
4540 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4541 __ bind(&next_capture);
4542 __ Subu(a1, a1, Operand(1));
4543 __ Branch(&done, lt, a1, Operand(zero_reg));
4544 // Read the value from the static offsets vector buffer.
4545 __ lw(a3, MemOperand(a2, 0));
4546 __ addiu(a2, a2, kPointerSize);
4547 // Store the smi value in the last match info.
4548 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
4549 __ sw(a3, MemOperand(a0, 0));
4550 __ Branch(&next_capture, USE_DELAY_SLOT);
4551 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
4552
4553 __ bind(&done);
4554
4555 // Return last match info.
4556 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
4557 __ Addu(sp, sp, Operand(4 * kPointerSize));
4558 __ Ret();
4559
4560 // Do the runtime call to execute the regexp.
4561 __ bind(&runtime);
4562 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4563#endif // V8_INTERPRETED_REGEXP
Steve Block44f0eee2011-05-26 01:26:41 +01004564}
4565
4566
4567void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004568 const int kMaxInlineLength = 100;
4569 Label slowcase;
4570 Label done;
4571 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4572 STATIC_ASSERT(kSmiTag == 0);
4573 STATIC_ASSERT(kSmiTagSize == 1);
4574 __ JumpIfNotSmi(a1, &slowcase);
4575 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
4576 // Smi-tagging is equivalent to multiplying by 2.
4577 // Allocate RegExpResult followed by FixedArray with size in ebx.
4578 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4579 // Elements: [Map][Length][..elements..]
4580 // Size of JSArray with two in-object properties and the header of a
4581 // FixedArray.
4582 int objects_size =
4583 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4584 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
4585 __ Addu(a2, t1, Operand(objects_size));
4586 __ AllocateInNewSpace(
4587 a2, // In: Size, in words.
4588 v0, // Out: Start of allocation (tagged).
4589 a3, // Scratch register.
4590 t0, // Scratch register.
4591 &slowcase,
4592 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4593 // v0: Start of allocated area, object-tagged.
4594 // a1: Number of elements in array, as smi.
4595 // t1: Number of elements, untagged.
4596
4597 // Set JSArray map to global.regexp_result_map().
4598 // Set empty properties FixedArray.
4599 // Set elements to point to FixedArray allocated right after the JSArray.
4600 // Interleave operations for better latency.
4601 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
4602 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
4603 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
4604 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
4605 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
4606 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
4607 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4608 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
4609
4610 // Set input, index and length fields from arguments.
4611 __ lw(a1, MemOperand(sp, kPointerSize * 0));
4612 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
4613 __ lw(a1, MemOperand(sp, kPointerSize * 1));
4614 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
4615 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4616 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
4617
4618 // Fill out the elements FixedArray.
4619 // v0: JSArray, tagged.
4620 // a3: FixedArray, tagged.
4621 // t1: Number of elements in array, untagged.
4622
4623 // Set map.
4624 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
4625 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
4626 // Set FixedArray length.
4627 __ sll(t2, t1, kSmiTagSize);
4628 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4629 // Fill contents of fixed-array with the-hole.
4630 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
4631 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4632 // Fill fixed array elements with hole.
4633 // v0: JSArray, tagged.
4634 // a2: the hole.
4635 // a3: Start of elements in FixedArray.
4636 // t1: Number of elements to fill.
4637 Label loop;
4638 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
4639 __ addu(t1, t1, a3); // Point past last element to store.
4640 __ bind(&loop);
4641 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
4642 __ sw(a2, MemOperand(a3));
4643 __ Branch(&loop, USE_DELAY_SLOT);
4644 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
4645
4646 __ bind(&done);
4647 __ Addu(sp, sp, Operand(3 * kPointerSize));
4648 __ Ret();
4649
4650 __ bind(&slowcase);
4651 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01004652}
4653
4654
4655void CallFunctionStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004656 Label slow;
4657
4658 // The receiver might implicitly be the global object. This is
4659 // indicated by passing the hole as the receiver to the call
4660 // function stub.
4661 if (ReceiverMightBeImplicit()) {
4662 Label call;
4663 // Get the receiver from the stack.
4664 // function, receiver [, arguments]
4665 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
4666 // Call as function is indicated with the hole.
4667 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4668 __ Branch(&call, ne, t0, Operand(at));
4669 // Patch the receiver on the stack with the global receiver object.
4670 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4671 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
4672 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4673 __ bind(&call);
4674 }
4675
4676 // Get the function to call from the stack.
4677 // function, receiver [, arguments]
4678 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
4679
4680 // Check that the function is really a JavaScript function.
4681 // a1: pushed function (to be verified)
4682 __ JumpIfSmi(a1, &slow);
4683 // Get the map of the function object.
4684 __ GetObjectType(a1, a2, a2);
4685 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
4686
4687 // Fast-case: Invoke the function now.
4688 // a1: pushed function
4689 ParameterCount actual(argc_);
4690
4691 if (ReceiverMightBeImplicit()) {
4692 Label call_as_function;
4693 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4694 __ Branch(&call_as_function, eq, t0, Operand(at));
4695 __ InvokeFunction(a1, actual, JUMP_FUNCTION);
4696 __ bind(&call_as_function);
4697 }
4698 __ InvokeFunction(a1,
4699 actual,
4700 JUMP_FUNCTION,
4701 NullCallWrapper(),
4702 CALL_AS_FUNCTION);
4703
4704 // Slow-case: Non-function called.
4705 __ bind(&slow);
4706 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4707 // of the original receiver from the call site).
4708 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4709 __ li(a0, Operand(argc_)); // Setup the number of arguments.
4710 __ mov(a2, zero_reg);
4711 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
4712 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4713 RelocInfo::CODE_TARGET);
Steve Block44f0eee2011-05-26 01:26:41 +01004714}
4715
4716
4717// Unfortunately you have to run without snapshots to see most of these
4718// names in the profile since most compare stubs end up in the snapshot.
4719const char* CompareStub::GetName() {
Ben Murdoch257744e2011-11-30 15:57:28 +00004720 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4721 (lhs_.is(a1) && rhs_.is(a0)));
4722
4723 if (name_ != NULL) return name_;
4724 const int kMaxNameLength = 100;
4725 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
4726 kMaxNameLength);
4727 if (name_ == NULL) return "OOM";
4728
4729 const char* cc_name;
4730 switch (cc_) {
4731 case lt: cc_name = "LT"; break;
4732 case gt: cc_name = "GT"; break;
4733 case le: cc_name = "LE"; break;
4734 case ge: cc_name = "GE"; break;
4735 case eq: cc_name = "EQ"; break;
4736 case ne: cc_name = "NE"; break;
4737 default: cc_name = "UnknownCondition"; break;
4738 }
4739
4740 const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
4741 const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
4742
4743 const char* strict_name = "";
4744 if (strict_ && (cc_ == eq || cc_ == ne)) {
4745 strict_name = "_STRICT";
4746 }
4747
4748 const char* never_nan_nan_name = "";
4749 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
4750 never_nan_nan_name = "_NO_NAN";
4751 }
4752
4753 const char* include_number_compare_name = "";
4754 if (!include_number_compare_) {
4755 include_number_compare_name = "_NO_NUMBER";
4756 }
4757
4758 const char* include_smi_compare_name = "";
4759 if (!include_smi_compare_) {
4760 include_smi_compare_name = "_NO_SMI";
4761 }
4762
4763 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
4764 "CompareStub_%s%s%s%s%s%s",
4765 cc_name,
4766 lhs_name,
4767 rhs_name,
4768 strict_name,
4769 never_nan_nan_name,
4770 include_number_compare_name,
4771 include_smi_compare_name);
Steve Block44f0eee2011-05-26 01:26:41 +01004772 return name_;
4773}
4774
4775
4776int CompareStub::MinorKey() {
Ben Murdoch257744e2011-11-30 15:57:28 +00004777 // Encode the two parameters in a unique 16 bit value.
4778 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
4779 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4780 (lhs_.is(a1) && rhs_.is(a0)));
4781 return ConditionField::encode(static_cast<unsigned>(cc_))
4782 | RegisterField::encode(lhs_.is(a0))
4783 | StrictField::encode(strict_)
4784 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
4785 | IncludeSmiCompareField::encode(include_smi_compare_);
Steve Block44f0eee2011-05-26 01:26:41 +01004786}
4787
4788
Ben Murdoch257744e2011-11-30 15:57:28 +00004789// StringCharCodeAtGenerator.
Steve Block44f0eee2011-05-26 01:26:41 +01004790void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004791 Label flat_string;
4792 Label ascii_string;
4793 Label got_char_code;
4794
4795 ASSERT(!t0.is(scratch_));
4796 ASSERT(!t0.is(index_));
4797 ASSERT(!t0.is(result_));
4798 ASSERT(!t0.is(object_));
4799
4800 // If the receiver is a smi trigger the non-string case.
4801 __ JumpIfSmi(object_, receiver_not_string_);
4802
4803 // Fetch the instance type of the receiver into result register.
4804 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4805 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4806 // If the receiver is not a string trigger the non-string case.
4807 __ And(t0, result_, Operand(kIsNotStringMask));
4808 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
4809
4810 // If the index is non-smi trigger the non-smi case.
4811 __ JumpIfNotSmi(index_, &index_not_smi_);
4812
4813 // Put smi-tagged index into scratch register.
4814 __ mov(scratch_, index_);
4815 __ bind(&got_smi_index_);
4816
4817 // Check for index out of range.
4818 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
4819 __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
4820
4821 // We need special handling for non-flat strings.
4822 STATIC_ASSERT(kSeqStringTag == 0);
4823 __ And(t0, result_, Operand(kStringRepresentationMask));
4824 __ Branch(&flat_string, eq, t0, Operand(zero_reg));
4825
4826 // Handle non-flat strings.
4827 __ And(t0, result_, Operand(kIsConsStringMask));
4828 __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
4829
4830 // ConsString.
4831 // Check whether the right hand side is the empty string (i.e. if
4832 // this is really a flat string in a cons string). If that is not
4833 // the case we would rather go to the runtime system now to flatten
4834 // the string.
4835 __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
4836 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
4837 __ Branch(&call_runtime_, ne, result_, Operand(t0));
4838
4839 // Get the first of the two strings and load its instance type.
4840 __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
4841 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4842 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4843 // If the first cons component is also non-flat, then go to runtime.
4844 STATIC_ASSERT(kSeqStringTag == 0);
4845
4846 __ And(t0, result_, Operand(kStringRepresentationMask));
4847 __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
4848
4849 // Check for 1-byte or 2-byte string.
4850 __ bind(&flat_string);
4851 STATIC_ASSERT(kAsciiStringTag != 0);
4852 __ And(t0, result_, Operand(kStringEncodingMask));
4853 __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
4854
4855 // 2-byte string.
4856 // Load the 2-byte character code into the result register. We can
4857 // add without shifting since the smi tag size is the log2 of the
4858 // number of bytes in a two-byte character.
4859 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
4860 __ Addu(scratch_, object_, Operand(scratch_));
4861 __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
4862 __ Branch(&got_char_code);
4863
4864 // ASCII string.
4865 // Load the byte into the result register.
4866 __ bind(&ascii_string);
4867
4868 __ srl(t0, scratch_, kSmiTagSize);
4869 __ Addu(scratch_, object_, t0);
4870
4871 __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
4872
4873 __ bind(&got_char_code);
4874 __ sll(result_, result_, kSmiTagSize);
4875 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01004876}
4877
4878
4879void StringCharCodeAtGenerator::GenerateSlow(
4880 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004881 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4882
4883 // Index is not a smi.
4884 __ bind(&index_not_smi_);
4885 // If index is a heap number, try converting it to an integer.
4886 __ CheckMap(index_,
4887 scratch_,
4888 Heap::kHeapNumberMapRootIndex,
4889 index_not_number_,
4890 DONT_DO_SMI_CHECK);
4891 call_helper.BeforeCall(masm);
4892 // Consumed by runtime conversion function:
4893 __ Push(object_, index_, index_);
4894 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4895 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4896 } else {
4897 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4898 // NumberToSmi discards numbers that are not exact integers.
4899 __ CallRuntime(Runtime::kNumberToSmi, 1);
4900 }
4901
4902 // Save the conversion result before the pop instructions below
4903 // have a chance to overwrite it.
4904
4905 __ Move(scratch_, v0);
4906
4907 __ pop(index_);
4908 __ pop(object_);
4909 // Reload the instance type.
4910 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4911 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4912 call_helper.AfterCall(masm);
4913 // If index is still not a smi, it must be out of range.
4914 __ JumpIfNotSmi(scratch_, index_out_of_range_);
4915 // Otherwise, return to the fast path.
4916 __ Branch(&got_smi_index_);
4917
4918 // Call runtime. We get here when the receiver is a string and the
4919 // index is a number, but the code of getting the actual character
4920 // is too complex (e.g., when the string needs to be flattened).
4921 __ bind(&call_runtime_);
4922 call_helper.BeforeCall(masm);
4923 __ Push(object_, index_);
4924 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4925
4926 __ Move(result_, v0);
4927
4928 call_helper.AfterCall(masm);
4929 __ jmp(&exit_);
4930
4931 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01004932}
4933
4934
4935// -------------------------------------------------------------------------
4936// StringCharFromCodeGenerator
4937
4938void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004939 // Fast case of Heap::LookupSingleCharacterStringFromCode.
4940
4941 ASSERT(!t0.is(result_));
4942 ASSERT(!t0.is(code_));
4943
4944 STATIC_ASSERT(kSmiTag == 0);
4945 STATIC_ASSERT(kSmiShiftSize == 0);
4946 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
4947 __ And(t0,
4948 code_,
4949 Operand(kSmiTagMask |
4950 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
4951 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
4952
4953 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4954 // At this point code register contains smi tagged ASCII char code.
4955 STATIC_ASSERT(kSmiTag == 0);
4956 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
4957 __ Addu(result_, result_, t0);
4958 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
4959 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
4960 __ Branch(&slow_case_, eq, result_, Operand(t0));
4961 __ bind(&exit_);
Steve Block44f0eee2011-05-26 01:26:41 +01004962}
4963
4964
4965void StringCharFromCodeGenerator::GenerateSlow(
4966 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004967 __ Abort("Unexpected fallthrough to CharFromCode slow case");
4968
4969 __ bind(&slow_case_);
4970 call_helper.BeforeCall(masm);
4971 __ push(code_);
4972 __ CallRuntime(Runtime::kCharFromCode, 1);
4973 __ Move(result_, v0);
4974
4975 call_helper.AfterCall(masm);
4976 __ Branch(&exit_);
4977
4978 __ Abort("Unexpected fallthrough from CharFromCode slow case");
Steve Block44f0eee2011-05-26 01:26:41 +01004979}
4980
4981
4982// -------------------------------------------------------------------------
4983// StringCharAtGenerator
4984
4985void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004986 char_code_at_generator_.GenerateFast(masm);
4987 char_from_code_generator_.GenerateFast(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01004988}
4989
4990
4991void StringCharAtGenerator::GenerateSlow(
4992 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004993 char_code_at_generator_.GenerateSlow(masm, call_helper);
4994 char_from_code_generator_.GenerateSlow(masm, call_helper);
Steve Block44f0eee2011-05-26 01:26:41 +01004995}
4996
4997
4998class StringHelper : public AllStatic {
4999 public:
5000 // Generate code for copying characters using a simple loop. This should only
5001 // be used in places where the number of characters is small and the
5002 // additional setup and checking in GenerateCopyCharactersLong adds too much
5003 // overhead. Copying of overlapping regions is not supported.
5004 // Dest register ends at the position after the last character written.
5005 static void GenerateCopyCharacters(MacroAssembler* masm,
5006 Register dest,
5007 Register src,
5008 Register count,
5009 Register scratch,
5010 bool ascii);
5011
5012 // Generate code for copying a large number of characters. This function
5013 // is allowed to spend extra time setting up conditions to make copying
5014 // faster. Copying of overlapping regions is not supported.
5015 // Dest register ends at the position after the last character written.
5016 static void GenerateCopyCharactersLong(MacroAssembler* masm,
5017 Register dest,
5018 Register src,
5019 Register count,
5020 Register scratch1,
5021 Register scratch2,
5022 Register scratch3,
5023 Register scratch4,
5024 Register scratch5,
5025 int flags);
5026
5027
5028 // Probe the symbol table for a two character string. If the string is
5029 // not found by probing a jump to the label not_found is performed. This jump
5030 // does not guarantee that the string is not in the symbol table. If the
5031 // string is found the code falls through with the string in register r0.
5032 // Contents of both c1 and c2 registers are modified. At the exit c1 is
5033 // guaranteed to contain halfword with low and high bytes equal to
5034 // initial contents of c1 and c2 respectively.
5035 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5036 Register c1,
5037 Register c2,
5038 Register scratch1,
5039 Register scratch2,
5040 Register scratch3,
5041 Register scratch4,
5042 Register scratch5,
5043 Label* not_found);
5044
5045 // Generate string hash.
5046 static void GenerateHashInit(MacroAssembler* masm,
5047 Register hash,
5048 Register character);
5049
5050 static void GenerateHashAddCharacter(MacroAssembler* masm,
5051 Register hash,
5052 Register character);
5053
5054 static void GenerateHashGetHash(MacroAssembler* masm,
5055 Register hash);
5056
5057 private:
5058 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5059};
5060
5061
5062void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5063 Register dest,
5064 Register src,
5065 Register count,
5066 Register scratch,
5067 bool ascii) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005068 Label loop;
5069 Label done;
5070 // This loop just copies one character at a time, as it is only used for
5071 // very short strings.
5072 if (!ascii) {
5073 __ addu(count, count, count);
5074 }
5075 __ Branch(&done, eq, count, Operand(zero_reg));
5076 __ addu(count, dest, count); // Count now points to the last dest byte.
5077
5078 __ bind(&loop);
5079 __ lbu(scratch, MemOperand(src));
5080 __ addiu(src, src, 1);
5081 __ sb(scratch, MemOperand(dest));
5082 __ addiu(dest, dest, 1);
5083 __ Branch(&loop, lt, dest, Operand(count));
5084
5085 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005086}
5087
5088
5089enum CopyCharactersFlags {
5090 COPY_ASCII = 1,
5091 DEST_ALWAYS_ALIGNED = 2
5092};
5093
5094
5095void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5096 Register dest,
5097 Register src,
5098 Register count,
5099 Register scratch1,
5100 Register scratch2,
5101 Register scratch3,
5102 Register scratch4,
5103 Register scratch5,
5104 int flags) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005105 bool ascii = (flags & COPY_ASCII) != 0;
5106 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5107
5108 if (dest_always_aligned && FLAG_debug_code) {
5109 // Check that destination is actually word aligned if the flag says
5110 // that it is.
5111 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5112 __ Check(eq,
5113 "Destination of copy not aligned.",
5114 scratch4,
5115 Operand(zero_reg));
5116 }
5117
5118 const int kReadAlignment = 4;
5119 const int kReadAlignmentMask = kReadAlignment - 1;
5120 // Ensure that reading an entire aligned word containing the last character
5121 // of a string will not read outside the allocated area (because we pad up
5122 // to kObjectAlignment).
5123 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5124 // Assumes word reads and writes are little endian.
5125 // Nothing to do for zero characters.
5126 Label done;
5127
5128 if (!ascii) {
5129 __ addu(count, count, count);
5130 }
5131 __ Branch(&done, eq, count, Operand(zero_reg));
5132
5133 Label byte_loop;
5134 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5135 __ Subu(scratch1, count, Operand(8));
5136 __ Addu(count, dest, Operand(count));
5137 Register limit = count; // Read until src equals this.
5138 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5139
5140 if (!dest_always_aligned) {
5141 // Align dest by byte copying. Copies between zero and three bytes.
5142 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5143 Label dest_aligned;
5144 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5145 Label aligned_loop;
5146 __ bind(&aligned_loop);
5147 __ lbu(scratch1, MemOperand(src));
5148 __ addiu(src, src, 1);
5149 __ sb(scratch1, MemOperand(dest));
5150 __ addiu(dest, dest, 1);
5151 __ addiu(scratch4, scratch4, 1);
5152 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5153 __ bind(&dest_aligned);
5154 }
5155
5156 Label simple_loop;
5157
5158 __ And(scratch4, src, Operand(kReadAlignmentMask));
5159 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5160
5161 // Loop for src/dst that are not aligned the same way.
5162 // This loop uses lwl and lwr instructions. These instructions
5163 // depend on the endianness, and the implementation assumes little-endian.
5164 {
5165 Label loop;
5166 __ bind(&loop);
5167 __ lwr(scratch1, MemOperand(src));
5168 __ Addu(src, src, Operand(kReadAlignment));
5169 __ lwl(scratch1, MemOperand(src, -1));
5170 __ sw(scratch1, MemOperand(dest));
5171 __ Addu(dest, dest, Operand(kReadAlignment));
5172 __ Subu(scratch2, limit, dest);
5173 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5174 }
5175
5176 __ Branch(&byte_loop);
5177
5178 // Simple loop.
5179 // Copy words from src to dest, until less than four bytes left.
5180 // Both src and dest are word aligned.
5181 __ bind(&simple_loop);
5182 {
5183 Label loop;
5184 __ bind(&loop);
5185 __ lw(scratch1, MemOperand(src));
5186 __ Addu(src, src, Operand(kReadAlignment));
5187 __ sw(scratch1, MemOperand(dest));
5188 __ Addu(dest, dest, Operand(kReadAlignment));
5189 __ Subu(scratch2, limit, dest);
5190 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5191 }
5192
5193 // Copy bytes from src to dest until dest hits limit.
5194 __ bind(&byte_loop);
5195 // Test if dest has already reached the limit.
5196 __ Branch(&done, ge, dest, Operand(limit));
5197 __ lbu(scratch1, MemOperand(src));
5198 __ addiu(src, src, 1);
5199 __ sb(scratch1, MemOperand(dest));
5200 __ addiu(dest, dest, 1);
5201 __ Branch(&byte_loop);
5202
5203 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01005204}
5205
5206
5207void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5208 Register c1,
5209 Register c2,
5210 Register scratch1,
5211 Register scratch2,
5212 Register scratch3,
5213 Register scratch4,
5214 Register scratch5,
5215 Label* not_found) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005216 // Register scratch3 is the general scratch register in this function.
5217 Register scratch = scratch3;
5218
5219 // Make sure that both characters are not digits as such strings has a
5220 // different hash algorithm. Don't try to look for these in the symbol table.
5221 Label not_array_index;
5222 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5223 __ Branch(&not_array_index,
5224 Ugreater,
5225 scratch,
5226 Operand(static_cast<int>('9' - '0')));
5227 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5228
5229 // If check failed combine both characters into single halfword.
5230 // This is required by the contract of the method: code at the
5231 // not_found branch expects this combination in c1 register.
5232 Label tmp;
5233 __ sll(scratch1, c2, kBitsPerByte);
5234 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5235 __ Or(c1, c1, scratch1);
5236 __ bind(&tmp);
5237 __ Branch(not_found,
5238 Uless_equal,
5239 scratch,
5240 Operand(static_cast<int>('9' - '0')));
5241
5242 __ bind(&not_array_index);
5243 // Calculate the two character string hash.
5244 Register hash = scratch1;
5245 StringHelper::GenerateHashInit(masm, hash, c1);
5246 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5247 StringHelper::GenerateHashGetHash(masm, hash);
5248
5249 // Collect the two characters in a register.
5250 Register chars = c1;
5251 __ sll(scratch, c2, kBitsPerByte);
5252 __ Or(chars, chars, scratch);
5253
5254 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5255 // hash: hash of two character string.
5256
5257 // Load symbol table.
5258 // Load address of first element of the symbol table.
5259 Register symbol_table = c2;
5260 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5261
5262 Register undefined = scratch4;
5263 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5264
5265 // Calculate capacity mask from the symbol table capacity.
5266 Register mask = scratch2;
5267 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5268 __ sra(mask, mask, 1);
5269 __ Addu(mask, mask, -1);
5270
5271 // Calculate untagged address of the first element of the symbol table.
5272 Register first_symbol_table_element = symbol_table;
5273 __ Addu(first_symbol_table_element, symbol_table,
5274 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5275
5276 // Registers.
5277 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5278 // hash: hash of two character string
5279 // mask: capacity mask
5280 // first_symbol_table_element: address of the first element of
5281 // the symbol table
5282 // undefined: the undefined object
5283 // scratch: -
5284
5285 // Perform a number of probes in the symbol table.
5286 static const int kProbes = 4;
5287 Label found_in_symbol_table;
5288 Label next_probe[kProbes];
5289 Register candidate = scratch5; // Scratch register contains candidate.
5290 for (int i = 0; i < kProbes; i++) {
5291 // Calculate entry in symbol table.
5292 if (i > 0) {
5293 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5294 } else {
5295 __ mov(candidate, hash);
5296 }
5297
5298 __ And(candidate, candidate, Operand(mask));
5299
5300 // Load the entry from the symble table.
5301 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5302 __ sll(scratch, candidate, kPointerSizeLog2);
5303 __ Addu(scratch, scratch, first_symbol_table_element);
5304 __ lw(candidate, MemOperand(scratch));
5305
5306 // If entry is undefined no string with this hash can be found.
5307 Label is_string;
5308 __ GetObjectType(candidate, scratch, scratch);
5309 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5310
5311 __ Branch(not_found, eq, undefined, Operand(candidate));
5312 // Must be null (deleted entry).
5313 if (FLAG_debug_code) {
5314 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
5315 __ Assert(eq, "oddball in symbol table is not undefined or null",
5316 scratch, Operand(candidate));
5317 }
5318 __ jmp(&next_probe[i]);
5319
5320 __ bind(&is_string);
5321
5322 // Check that the candidate is a non-external ASCII string. The instance
5323 // type is still in the scratch register from the CompareObjectType
5324 // operation.
5325 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5326
5327 // If length is not 2 the string is not a candidate.
5328 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5329 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5330
5331 // Check if the two characters match.
5332 // Assumes that word load is little endian.
5333 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5334 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5335 __ bind(&next_probe[i]);
5336 }
5337
5338 // No matching 2 character string found by probing.
5339 __ jmp(not_found);
5340
5341 // Scratch register contains result when we fall through to here.
5342 Register result = candidate;
5343 __ bind(&found_in_symbol_table);
5344 __ mov(v0, result);
Steve Block44f0eee2011-05-26 01:26:41 +01005345}
5346
5347
5348void StringHelper::GenerateHashInit(MacroAssembler* masm,
5349 Register hash,
5350 Register character) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005351 // hash = character + (character << 10);
5352 __ sll(hash, character, 10);
5353 __ addu(hash, hash, character);
5354 // hash ^= hash >> 6;
5355 __ sra(at, hash, 6);
5356 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005357}
5358
5359
5360void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5361 Register hash,
5362 Register character) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005363 // hash += character;
5364 __ addu(hash, hash, character);
5365 // hash += hash << 10;
5366 __ sll(at, hash, 10);
5367 __ addu(hash, hash, at);
5368 // hash ^= hash >> 6;
5369 __ sra(at, hash, 6);
5370 __ xor_(hash, hash, at);
Steve Block44f0eee2011-05-26 01:26:41 +01005371}
5372
5373
5374void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5375 Register hash) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005376 // hash += hash << 3;
5377 __ sll(at, hash, 3);
5378 __ addu(hash, hash, at);
5379 // hash ^= hash >> 11;
5380 __ sra(at, hash, 11);
5381 __ xor_(hash, hash, at);
5382 // hash += hash << 15;
5383 __ sll(at, hash, 15);
5384 __ addu(hash, hash, at);
5385
5386 // if (hash == 0) hash = 27;
5387 __ ori(at, zero_reg, 27);
5388 __ movz(hash, at, hash);
Steve Block44f0eee2011-05-26 01:26:41 +01005389}
5390
5391
5392void SubStringStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005393 Label sub_string_runtime;
5394 // Stack frame on entry.
5395 // ra: return address
5396 // sp[0]: to
5397 // sp[4]: from
5398 // sp[8]: string
5399
5400 // This stub is called from the native-call %_SubString(...), so
5401 // nothing can be assumed about the arguments. It is tested that:
5402 // "string" is a sequential string,
5403 // both "from" and "to" are smis, and
5404 // 0 <= from <= to <= string.length.
5405 // If any of these assumptions fail, we call the runtime system.
5406
5407 static const int kToOffset = 0 * kPointerSize;
5408 static const int kFromOffset = 1 * kPointerSize;
5409 static const int kStringOffset = 2 * kPointerSize;
5410
5411 Register to = t2;
5412 Register from = t3;
5413
5414 // Check bounds and smi-ness.
5415 __ lw(to, MemOperand(sp, kToOffset));
5416 __ lw(from, MemOperand(sp, kFromOffset));
5417 STATIC_ASSERT(kFromOffset == kToOffset + 4);
5418 STATIC_ASSERT(kSmiTag == 0);
5419 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5420
5421 __ JumpIfNotSmi(from, &sub_string_runtime);
5422 __ JumpIfNotSmi(to, &sub_string_runtime);
5423
5424 __ sra(a3, from, kSmiTagSize); // Remove smi tag.
5425 __ sra(t5, to, kSmiTagSize); // Remove smi tag.
5426
5427 // a3: from index (untagged smi)
5428 // t5: to index (untagged smi)
5429
5430 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
5431
5432 __ subu(a2, t5, a3);
5433 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
5434
5435 // Special handling of sub-strings of length 1 and 2. One character strings
5436 // are handled in the runtime system (looked up in the single character
5437 // cache). Two character strings are looked for in the symbol cache.
5438 __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5439
5440 // Both to and from are smis.
5441
5442 // a2: result string length
5443 // a3: from index (untagged smi)
5444 // t2: (a.k.a. to): to (smi)
5445 // t3: (a.k.a. from): from offset (smi)
5446 // t5: to index (untagged smi)
5447
5448 // Make sure first argument is a sequential (or flat) string.
5449 __ lw(t1, MemOperand(sp, kStringOffset));
5450 __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
5451
5452 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
5453 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
5454 __ And(t4, a1, Operand(kIsNotStringMask));
5455
5456 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
5457
5458 // a1: instance type
5459 // a2: result string length
5460 // a3: from index (untagged smi)
5461 // t1: string
5462 // t2: (a.k.a. to): to (smi)
5463 // t3: (a.k.a. from): from offset (smi)
5464 // t5: to index (untagged smi)
5465
5466 Label seq_string;
5467 __ And(t0, a1, Operand(kStringRepresentationMask));
5468 STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5469 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5470
5471 // External strings go to runtime.
5472 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
5473
5474 // Sequential strings are handled directly.
5475 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
5476
5477 // Cons string. Try to recurse (once) on the first substring.
5478 // (This adds a little more generality than necessary to handle flattened
5479 // cons strings, but not much).
5480 __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
5481 __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
5482 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5483 STATIC_ASSERT(kSeqStringTag == 0);
5484 // Cons and External strings go to runtime.
5485 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
5486
5487 // Definitly a sequential string.
5488 __ bind(&seq_string);
5489
5490 // a1: instance type
5491 // a2: result string length
5492 // a3: from index (untagged smi)
5493 // t1: string
5494 // t2: (a.k.a. to): to (smi)
5495 // t3: (a.k.a. from): from offset (smi)
5496 // t5: to index (untagged smi)
5497
5498 __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
5499 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
5500 to = no_reg;
5501
5502 // a1: instance type
5503 // a2: result string length
5504 // a3: from index (untagged smi)
5505 // t1: string
5506 // t3: (a.k.a. from): from offset (smi)
5507 // t5: to index (untagged smi)
5508
5509 // Check for flat ASCII string.
5510 Label non_ascii_flat;
5511 STATIC_ASSERT(kTwoByteStringTag == 0);
5512
5513 __ And(t4, a1, Operand(kStringEncodingMask));
5514 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
5515
5516 Label result_longer_than_two;
5517 __ Branch(&result_longer_than_two, gt, a2, Operand(2));
5518
5519 // Sub string of length 2 requested.
5520 // Get the two characters forming the sub string.
5521 __ Addu(t1, t1, Operand(a3));
5522 __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
5523 __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
5524
5525 // Try to lookup two character string in symbol table.
5526 Label make_two_character_string;
5527 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5528 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
5529 Counters* counters = masm->isolate()->counters();
5530 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5531 __ Addu(sp, sp, Operand(3 * kPointerSize));
5532 __ Ret();
5533
5534
5535 // a2: result string length.
5536 // a3: two characters combined into halfword in little endian byte order.
5537 __ bind(&make_two_character_string);
5538 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
5539 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5540 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5541 __ Addu(sp, sp, Operand(3 * kPointerSize));
5542 __ Ret();
5543
5544 __ bind(&result_longer_than_two);
5545
5546 // Allocate the result.
5547 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
5548
5549 // v0: result string.
5550 // a2: result string length.
5551 // a3: from index (untagged smi)
5552 // t1: string.
5553 // t3: (a.k.a. from): from offset (smi)
5554 // Locate first character of result.
5555 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5556 // Locate 'from' character of string.
5557 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5558 __ Addu(t1, t1, Operand(a3));
5559
5560 // v0: result string.
5561 // a1: first character of result string.
5562 // a2: result string length.
5563 // t1: first character of sub string to copy.
5564 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5565 StringHelper::GenerateCopyCharactersLong(
5566 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
5567 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5568 __ Addu(sp, sp, Operand(3 * kPointerSize));
5569 __ Ret();
5570
5571 __ bind(&non_ascii_flat);
5572 // a2: result string length.
5573 // t1: string.
5574 // t3: (a.k.a. from): from offset (smi)
5575 // Check for flat two byte string.
5576
5577 // Allocate the result.
5578 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
5579
5580 // v0: result string.
5581 // a2: result string length.
5582 // t1: string.
5583 // Locate first character of result.
5584 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5585 // Locate 'from' character of string.
5586 __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5587 // As "from" is a smi it is 2 times the value which matches the size of a two
5588 // byte character.
5589 __ Addu(t1, t1, Operand(from));
5590 from = no_reg;
5591
5592 // v0: result string.
5593 // a1: first character of result.
5594 // a2: result length.
5595 // t1: first character of string to copy.
5596 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5597 StringHelper::GenerateCopyCharactersLong(
5598 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
5599 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5600 __ Addu(sp, sp, Operand(3 * kPointerSize));
5601 __ Ret();
5602
5603 // Just jump to runtime to create the sub string.
5604 __ bind(&sub_string_runtime);
5605 __ TailCallRuntime(Runtime::kSubString, 3, 1);
5606}
5607
5608
5609void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5610 Register left,
5611 Register right,
5612 Register scratch1,
5613 Register scratch2,
5614 Register scratch3) {
5615 Register length = scratch1;
5616
5617 // Compare lengths.
5618 Label strings_not_equal, check_zero_length;
5619 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
5620 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5621 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
5622 __ bind(&strings_not_equal);
5623 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
5624 __ Ret();
5625
5626 // Check if the length is zero.
5627 Label compare_chars;
5628 __ bind(&check_zero_length);
5629 STATIC_ASSERT(kSmiTag == 0);
5630 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
5631 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5632 __ Ret();
5633
5634 // Compare characters.
5635 __ bind(&compare_chars);
5636
5637 GenerateAsciiCharsCompareLoop(masm,
5638 left, right, length, scratch2, scratch3, v0,
5639 &strings_not_equal);
5640
5641 // Characters are equal.
5642 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5643 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01005644}
5645
5646
5647void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Steve Block44f0eee2011-05-26 01:26:41 +01005648 Register left,
Ben Murdoch257744e2011-11-30 15:57:28 +00005649 Register right,
Steve Block44f0eee2011-05-26 01:26:41 +01005650 Register scratch1,
5651 Register scratch2,
5652 Register scratch3,
5653 Register scratch4) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005654 Label result_not_equal, compare_lengths;
5655 // Find minimum length and length difference.
5656 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
5657 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5658 __ Subu(scratch3, scratch1, Operand(scratch2));
5659 Register length_delta = scratch3;
5660 __ slt(scratch4, scratch2, scratch1);
5661 __ movn(scratch1, scratch2, scratch4);
5662 Register min_length = scratch1;
5663 STATIC_ASSERT(kSmiTag == 0);
5664 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
5665
5666 // Compare loop.
5667 GenerateAsciiCharsCompareLoop(masm,
5668 left, right, min_length, scratch2, scratch4, v0,
5669 &result_not_equal);
5670
5671 // Compare lengths - strings up to min-length are equal.
5672 __ bind(&compare_lengths);
5673 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
5674 // Use length_delta as result if it's zero.
5675 __ mov(scratch2, length_delta);
5676 __ mov(scratch4, zero_reg);
5677 __ mov(v0, zero_reg);
5678
5679 __ bind(&result_not_equal);
5680 // Conditionally update the result based either on length_delta or
5681 // the last comparion performed in the loop above.
5682 Label ret;
5683 __ Branch(&ret, eq, scratch2, Operand(scratch4));
5684 __ li(v0, Operand(Smi::FromInt(GREATER)));
5685 __ Branch(&ret, gt, scratch2, Operand(scratch4));
5686 __ li(v0, Operand(Smi::FromInt(LESS)));
5687 __ bind(&ret);
5688 __ Ret();
5689}
5690
5691
5692void StringCompareStub::GenerateAsciiCharsCompareLoop(
5693 MacroAssembler* masm,
5694 Register left,
5695 Register right,
5696 Register length,
5697 Register scratch1,
5698 Register scratch2,
5699 Register scratch3,
5700 Label* chars_not_equal) {
5701 // Change index to run from -length to -1 by adding length to string
5702 // start. This means that loop ends when index reaches zero, which
5703 // doesn't need an additional compare.
5704 __ SmiUntag(length);
5705 __ Addu(scratch1, length,
5706 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5707 __ Addu(left, left, Operand(scratch1));
5708 __ Addu(right, right, Operand(scratch1));
5709 __ Subu(length, zero_reg, length);
5710 Register index = length; // index = -length;
5711
5712
5713 // Compare loop.
5714 Label loop;
5715 __ bind(&loop);
5716 __ Addu(scratch3, left, index);
5717 __ lbu(scratch1, MemOperand(scratch3));
5718 __ Addu(scratch3, right, index);
5719 __ lbu(scratch2, MemOperand(scratch3));
5720 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
5721 __ Addu(index, index, 1);
5722 __ Branch(&loop, ne, index, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01005723}
5724
5725
5726void StringCompareStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005727 Label runtime;
5728
5729 Counters* counters = masm->isolate()->counters();
5730
5731 // Stack frame on entry.
5732 // sp[0]: right string
5733 // sp[4]: left string
5734 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
5735 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
5736
5737 Label not_same;
5738 __ Branch(&not_same, ne, a0, Operand(a1));
5739 STATIC_ASSERT(EQUAL == 0);
5740 STATIC_ASSERT(kSmiTag == 0);
5741 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5742 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
5743 __ Addu(sp, sp, Operand(2 * kPointerSize));
5744 __ Ret();
5745
5746 __ bind(&not_same);
5747
5748 // Check that both objects are sequential ASCII strings.
5749 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
5750
5751 // Compare flat ASCII strings natively. Remove arguments from stack first.
5752 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
5753 __ Addu(sp, sp, Operand(2 * kPointerSize));
5754 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
5755
5756 __ bind(&runtime);
5757 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
Steve Block44f0eee2011-05-26 01:26:41 +01005758}
5759
5760
5761void StringAddStub::Generate(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00005762 Label string_add_runtime, call_builtin;
5763 Builtins::JavaScript builtin_id = Builtins::ADD;
5764
5765 Counters* counters = masm->isolate()->counters();
5766
5767 // Stack on entry:
5768 // sp[0]: second argument (right).
5769 // sp[4]: first argument (left).
5770
5771 // Load the two arguments.
5772 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
5773 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
5774
5775 // Make sure that both arguments are strings if not known in advance.
5776 if (flags_ == NO_STRING_ADD_FLAGS) {
5777 __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
5778 // Load instance types.
5779 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5780 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5781 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5782 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5783 STATIC_ASSERT(kStringTag == 0);
5784 // If either is not a string, go to runtime.
5785 __ Or(t4, t0, Operand(t1));
5786 __ And(t4, t4, Operand(kIsNotStringMask));
5787 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
5788 } else {
5789 // Here at least one of the arguments is definitely a string.
5790 // We convert the one that is not known to be a string.
5791 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5792 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5793 GenerateConvertArgument(
5794 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
5795 builtin_id = Builtins::STRING_ADD_RIGHT;
5796 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5797 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5798 GenerateConvertArgument(
5799 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
5800 builtin_id = Builtins::STRING_ADD_LEFT;
5801 }
5802 }
5803
5804 // Both arguments are strings.
5805 // a0: first string
5806 // a1: second string
5807 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5808 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5809 {
5810 Label strings_not_empty;
5811 // Check if either of the strings are empty. In that case return the other.
5812 // These tests use zero-length check on string-length whch is an Smi.
5813 // Assert that Smi::FromInt(0) is really 0.
5814 STATIC_ASSERT(kSmiTag == 0);
5815 ASSERT(Smi::FromInt(0) == 0);
5816 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
5817 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
5818 __ mov(v0, a0); // Assume we'll return first string (from a0).
5819 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
5820 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
5821 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
5822 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
5823 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
5824
5825 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5826 __ Addu(sp, sp, Operand(2 * kPointerSize));
5827 __ Ret();
5828
5829 __ bind(&strings_not_empty);
5830 }
5831
5832 // Untag both string-lengths.
5833 __ sra(a2, a2, kSmiTagSize);
5834 __ sra(a3, a3, kSmiTagSize);
5835
5836 // Both strings are non-empty.
5837 // a0: first string
5838 // a1: second string
5839 // a2: length of first string
5840 // a3: length of second string
5841 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5842 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5843 // Look at the length of the result of adding the two strings.
5844 Label string_add_flat_result, longer_than_two;
5845 // Adding two lengths can't overflow.
5846 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
5847 __ Addu(t2, a2, Operand(a3));
5848 // Use the symbol table when adding two one character strings, as it
5849 // helps later optimizations to return a symbol here.
5850 __ Branch(&longer_than_two, ne, t2, Operand(2));
5851
5852 // Check that both strings are non-external ASCII strings.
5853 if (flags_ != NO_STRING_ADD_FLAGS) {
5854 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5855 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5856 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5857 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5858 }
5859 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
5860 &string_add_runtime);
5861
5862 // Get the two characters forming the sub string.
5863 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
5864 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
5865
5866 // Try to lookup two character string in symbol table. If it is not found
5867 // just allocate a new one.
5868 Label make_two_character_string;
5869 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5870 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
5871 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5872 __ Addu(sp, sp, Operand(2 * kPointerSize));
5873 __ Ret();
5874
5875 __ bind(&make_two_character_string);
5876 // Resulting string has length 2 and first chars of two strings
5877 // are combined into single halfword in a2 register.
5878 // So we can fill resulting string without two loops by a single
5879 // halfword store instruction (which assumes that processor is
5880 // in a little endian mode).
5881 __ li(t2, Operand(2));
5882 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
5883 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5884 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5885 __ Addu(sp, sp, Operand(2 * kPointerSize));
5886 __ Ret();
5887
5888 __ bind(&longer_than_two);
5889 // Check if resulting string will be flat.
5890 __ Branch(&string_add_flat_result, lt, t2,
5891 Operand(String::kMinNonFlatLength));
5892 // Handle exceptionally long strings in the runtime system.
5893 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
5894 ASSERT(IsPowerOf2(String::kMaxLength + 1));
5895 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
5896 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
5897
5898 // If result is not supposed to be flat, allocate a cons string object.
5899 // If both strings are ASCII the result is an ASCII cons string.
5900 if (flags_ != NO_STRING_ADD_FLAGS) {
5901 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5902 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5903 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5904 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5905 }
5906 Label non_ascii, allocated, ascii_data;
5907 STATIC_ASSERT(kTwoByteStringTag == 0);
5908 // Branch to non_ascii if either string-encoding field is zero (non-ascii).
5909 __ And(t4, t0, Operand(t1));
5910 __ And(t4, t4, Operand(kStringEncodingMask));
5911 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
5912
5913 // Allocate an ASCII cons string.
5914 __ bind(&ascii_data);
5915 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
5916 __ bind(&allocated);
5917 // Fill the fields of the cons string.
5918 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
5919 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
5920 __ mov(v0, t3);
5921 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5922 __ Addu(sp, sp, Operand(2 * kPointerSize));
5923 __ Ret();
5924
5925 __ bind(&non_ascii);
5926 // At least one of the strings is two-byte. Check whether it happens
5927 // to contain only ASCII characters.
5928 // t0: first instance type.
5929 // t1: second instance type.
5930 // Branch to if _both_ instances have kAsciiDataHintMask set.
5931 __ And(at, t0, Operand(kAsciiDataHintMask));
5932 __ and_(at, at, t1);
5933 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
5934
5935 __ xor_(t0, t0, t1);
5936 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5937 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
5938 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
5939
5940 // Allocate a two byte cons string.
5941 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
5942 __ Branch(&allocated);
5943
5944 // Handle creating a flat result. First check that both strings are
5945 // sequential and that they have the same encoding.
5946 // a0: first string
5947 // a1: second string
5948 // a2: length of first string
5949 // a3: length of second string
5950 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5951 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5952 // t2: sum of lengths.
5953 __ bind(&string_add_flat_result);
5954 if (flags_ != NO_STRING_ADD_FLAGS) {
5955 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5956 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5957 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5958 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5959 }
5960 // Check that both strings are sequential, meaning that we
5961 // branch to runtime if either string tag is non-zero.
5962 STATIC_ASSERT(kSeqStringTag == 0);
5963 __ Or(t4, t0, Operand(t1));
5964 __ And(t4, t4, Operand(kStringRepresentationMask));
5965 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
5966
5967 // Now check if both strings have the same encoding (ASCII/Two-byte).
5968 // a0: first string
5969 // a1: second string
5970 // a2: length of first string
5971 // a3: length of second string
5972 // t0: first string instance type
5973 // t1: second string instance type
5974 // t2: sum of lengths.
5975 Label non_ascii_string_add_flat_result;
5976 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
5977 __ xor_(t3, t1, t0);
5978 __ And(t3, t3, Operand(kStringEncodingMask));
5979 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
5980 // And see if it's ASCII (0) or two-byte (1).
5981 __ And(t3, t0, Operand(kStringEncodingMask));
5982 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
5983
5984 // Both strings are sequential ASCII strings. We also know that they are
5985 // short (since the sum of the lengths is less than kMinNonFlatLength).
5986 // t2: length of resulting flat string
5987 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
5988 // Locate first character of result.
5989 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5990 // Locate first character of first argument.
5991 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5992 // a0: first character of first string.
5993 // a1: second string.
5994 // a2: length of first string.
5995 // a3: length of second string.
5996 // t2: first character of result.
5997 // t3: result string.
5998 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
5999
6000 // Load second argument and locate first character.
6001 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6002 // a1: first character of second string.
6003 // a3: length of second string.
6004 // t2: next character of result.
6005 // t3: result string.
6006 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6007 __ mov(v0, t3);
6008 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6009 __ Addu(sp, sp, Operand(2 * kPointerSize));
6010 __ Ret();
6011
6012 __ bind(&non_ascii_string_add_flat_result);
6013 // Both strings are sequential two byte strings.
6014 // a0: first string.
6015 // a1: second string.
6016 // a2: length of first string.
6017 // a3: length of second string.
6018 // t2: sum of length of strings.
6019 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6020 // a0: first string.
6021 // a1: second string.
6022 // a2: length of first string.
6023 // a3: length of second string.
6024 // t3: result string.
6025
6026 // Locate first character of result.
6027 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6028 // Locate first character of first argument.
6029 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6030
6031 // a0: first character of first string.
6032 // a1: second string.
6033 // a2: length of first string.
6034 // a3: length of second string.
6035 // t2: first character of result.
6036 // t3: result string.
6037 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6038
6039 // Locate first character of second argument.
6040 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6041
6042 // a1: first character of second string.
6043 // a3: length of second string.
6044 // t2: next character of result (after copy of first string).
6045 // t3: result string.
6046 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6047
6048 __ mov(v0, t3);
6049 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6050 __ Addu(sp, sp, Operand(2 * kPointerSize));
6051 __ Ret();
6052
6053 // Just jump to runtime to add the two strings.
6054 __ bind(&string_add_runtime);
6055 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6056
6057 if (call_builtin.is_linked()) {
6058 __ bind(&call_builtin);
6059 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6060 }
6061}
6062
6063
6064void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6065 int stack_offset,
6066 Register arg,
6067 Register scratch1,
6068 Register scratch2,
6069 Register scratch3,
6070 Register scratch4,
6071 Label* slow) {
6072 // First check if the argument is already a string.
6073 Label not_string, done;
6074 __ JumpIfSmi(arg, &not_string);
6075 __ GetObjectType(arg, scratch1, scratch1);
6076 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6077
6078 // Check the number to string cache.
6079 Label not_cached;
6080 __ bind(&not_string);
6081 // Puts the cached result into scratch1.
6082 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6083 arg,
6084 scratch1,
6085 scratch2,
6086 scratch3,
6087 scratch4,
6088 false,
6089 &not_cached);
6090 __ mov(arg, scratch1);
6091 __ sw(arg, MemOperand(sp, stack_offset));
6092 __ jmp(&done);
6093
6094 // Check if the argument is a safe string wrapper.
6095 __ bind(&not_cached);
6096 __ JumpIfSmi(arg, slow);
6097 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6098 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6099 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6100 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6101 __ And(scratch2, scratch2, scratch4);
6102 __ Branch(slow, ne, scratch2, Operand(scratch4));
6103 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6104 __ sw(arg, MemOperand(sp, stack_offset));
6105
6106 __ bind(&done);
Steve Block44f0eee2011-05-26 01:26:41 +01006107}
6108
6109
6110void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006111 ASSERT(state_ == CompareIC::SMIS);
6112 Label miss;
6113 __ Or(a2, a1, a0);
6114 __ JumpIfNotSmi(a2, &miss);
6115
6116 if (GetCondition() == eq) {
6117 // For equality we do not care about the sign of the result.
6118 __ Subu(v0, a0, a1);
6119 } else {
6120 // Untag before subtracting to avoid handling overflow.
6121 __ SmiUntag(a1);
6122 __ SmiUntag(a0);
6123 __ Subu(v0, a1, a0);
6124 }
6125 __ Ret();
6126
6127 __ bind(&miss);
6128 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006129}
6130
6131
6132void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006133 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6134
6135 Label generic_stub;
6136 Label unordered;
6137 Label miss;
6138 __ And(a2, a1, Operand(a0));
6139 __ JumpIfSmi(a2, &generic_stub);
6140
6141 __ GetObjectType(a0, a2, a2);
6142 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6143 __ GetObjectType(a1, a2, a2);
6144 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6145
6146 // Inlining the double comparison and falling back to the general compare
6147 // stub if NaN is involved or FPU is unsupported.
6148 if (CpuFeatures::IsSupported(FPU)) {
6149 CpuFeatures::Scope scope(FPU);
6150
6151 // Load left and right operand.
6152 __ Subu(a2, a1, Operand(kHeapObjectTag));
6153 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6154 __ Subu(a2, a0, Operand(kHeapObjectTag));
6155 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6156
6157 Label fpu_eq, fpu_lt, fpu_gt;
6158 // Compare operands (test if unordered).
6159 __ c(UN, D, f0, f2);
6160 // Don't base result on status bits when a NaN is involved.
6161 __ bc1t(&unordered);
6162 __ nop();
6163
6164 // Test if equal.
6165 __ c(EQ, D, f0, f2);
6166 __ bc1t(&fpu_eq);
6167 __ nop();
6168
6169 // Test if unordered or less (unordered case is already handled).
6170 __ c(ULT, D, f0, f2);
6171 __ bc1t(&fpu_lt);
6172 __ nop();
6173
6174 // Otherwise it's greater.
6175 __ bc1f(&fpu_gt);
6176 __ nop();
6177
6178 // Return a result of -1, 0, or 1.
6179 __ bind(&fpu_eq);
6180 __ li(v0, Operand(EQUAL));
6181 __ Ret();
6182
6183 __ bind(&fpu_lt);
6184 __ li(v0, Operand(LESS));
6185 __ Ret();
6186
6187 __ bind(&fpu_gt);
6188 __ li(v0, Operand(GREATER));
6189 __ Ret();
6190
6191 __ bind(&unordered);
6192 }
6193
6194 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6195 __ bind(&generic_stub);
6196 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6197
6198 __ bind(&miss);
6199 GenerateMiss(masm);
6200}
6201
6202
6203void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6204 ASSERT(state_ == CompareIC::SYMBOLS);
6205 Label miss;
6206
6207 // Registers containing left and right operands respectively.
6208 Register left = a1;
6209 Register right = a0;
6210 Register tmp1 = a2;
6211 Register tmp2 = a3;
6212
6213 // Check that both operands are heap objects.
6214 __ JumpIfEitherSmi(left, right, &miss);
6215
6216 // Check that both operands are symbols.
6217 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6218 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6219 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6220 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6221 STATIC_ASSERT(kSymbolTag != 0);
6222 __ And(tmp1, tmp1, Operand(tmp2));
6223 __ And(tmp1, tmp1, kIsSymbolMask);
6224 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6225 // Make sure a0 is non-zero. At this point input operands are
6226 // guaranteed to be non-zero.
6227 ASSERT(right.is(a0));
6228 STATIC_ASSERT(EQUAL == 0);
6229 STATIC_ASSERT(kSmiTag == 0);
6230 __ mov(v0, right);
6231 // Symbols are compared by identity.
6232 __ Ret(ne, left, Operand(right));
6233 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6234 __ Ret();
6235
6236 __ bind(&miss);
6237 GenerateMiss(masm);
6238}
6239
6240
6241void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6242 ASSERT(state_ == CompareIC::STRINGS);
6243 Label miss;
6244
6245 // Registers containing left and right operands respectively.
6246 Register left = a1;
6247 Register right = a0;
6248 Register tmp1 = a2;
6249 Register tmp2 = a3;
6250 Register tmp3 = t0;
6251 Register tmp4 = t1;
6252 Register tmp5 = t2;
6253
6254 // Check that both operands are heap objects.
6255 __ JumpIfEitherSmi(left, right, &miss);
6256
6257 // Check that both operands are strings. This leaves the instance
6258 // types loaded in tmp1 and tmp2.
6259 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6260 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6261 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6262 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6263 STATIC_ASSERT(kNotStringTag != 0);
6264 __ Or(tmp3, tmp1, tmp2);
6265 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6266 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6267
6268 // Fast check for identical strings.
6269 Label left_ne_right;
6270 STATIC_ASSERT(EQUAL == 0);
6271 STATIC_ASSERT(kSmiTag == 0);
6272 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6273 __ mov(v0, zero_reg); // In the delay slot.
6274 __ Ret();
6275 __ bind(&left_ne_right);
6276
6277 // Handle not identical strings.
6278
6279 // Check that both strings are symbols. If they are, we're done
6280 // because we already know they are not identical.
6281 ASSERT(GetCondition() == eq);
6282 STATIC_ASSERT(kSymbolTag != 0);
6283 __ And(tmp3, tmp1, Operand(tmp2));
6284 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6285 Label is_symbol;
6286 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6287 __ mov(v0, a0); // In the delay slot.
6288 // Make sure a0 is non-zero. At this point input operands are
6289 // guaranteed to be non-zero.
6290 ASSERT(right.is(a0));
6291 __ Ret();
6292 __ bind(&is_symbol);
6293
6294 // Check that both strings are sequential ASCII.
6295 Label runtime;
6296 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6297 &runtime);
6298
6299 // Compare flat ASCII strings. Returns when done.
6300 StringCompareStub::GenerateFlatAsciiStringEquals(
6301 masm, left, right, tmp1, tmp2, tmp3);
6302
6303 // Handle more complex cases in runtime.
6304 __ bind(&runtime);
6305 __ Push(left, right);
6306 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6307
6308 __ bind(&miss);
6309 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006310}
6311
6312
6313void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006314 ASSERT(state_ == CompareIC::OBJECTS);
6315 Label miss;
6316 __ And(a2, a1, Operand(a0));
6317 __ JumpIfSmi(a2, &miss);
6318
6319 __ GetObjectType(a0, a2, a2);
6320 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6321 __ GetObjectType(a1, a2, a2);
6322 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6323
6324 ASSERT(GetCondition() == eq);
6325 __ Subu(v0, a0, Operand(a1));
6326 __ Ret();
6327
6328 __ bind(&miss);
6329 GenerateMiss(masm);
Steve Block44f0eee2011-05-26 01:26:41 +01006330}
6331
6332
6333void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
Ben Murdoch257744e2011-11-30 15:57:28 +00006334 __ Push(a1, a0);
6335 __ push(ra);
6336
6337 // Call the runtime system in a fresh internal frame.
6338 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6339 masm->isolate());
6340 __ EnterInternalFrame();
6341 __ Push(a1, a0);
6342 __ li(t0, Operand(Smi::FromInt(op_)));
6343 __ push(t0);
6344 __ CallExternalReference(miss, 3);
6345 __ LeaveInternalFrame();
6346 // Compute the entry point of the rewritten stub.
6347 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6348 // Restore registers.
6349 __ pop(ra);
6350 __ pop(a0);
6351 __ pop(a1);
6352 __ Jump(a2);
6353}
6354
6355void DirectCEntryStub::Generate(MacroAssembler* masm) {
6356 // No need to pop or drop anything, LeaveExitFrame will restore the old
6357 // stack, thus dropping the allocated space for the return value.
6358 // The saved ra is after the reserved stack space for the 4 args.
6359 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6360
6361 if (FLAG_debug_code && EnableSlowAsserts()) {
6362 // In case of an error the return address may point to a memory area
6363 // filled with kZapValue by the GC.
6364 // Dereference the address and check for this.
6365 __ lw(t0, MemOperand(t9));
6366 __ Assert(ne, "Received invalid return address.", t0,
6367 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6368 }
6369 __ Jump(t9);
Steve Block44f0eee2011-05-26 01:26:41 +01006370}
6371
6372
Ben Murdoch257744e2011-11-30 15:57:28 +00006373void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6374 ExternalReference function) {
6375 __ li(t9, Operand(function));
6376 this->GenerateCall(masm, t9);
6377}
6378
6379void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6380 Register target) {
6381 __ Move(t9, target);
6382 __ AssertStackIsAligned();
6383 // Allocate space for arg slots.
6384 __ Subu(sp, sp, kCArgsSlotsSize);
6385
6386 // Block the trampoline pool through the whole function to make sure the
6387 // number of generated instructions is constant.
6388 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6389
6390 // We need to get the current 'pc' value, which is not available on MIPS.
6391 Label find_ra;
6392 masm->bal(&find_ra); // ra = pc + 8.
6393 masm->nop(); // Branch delay slot nop.
6394 masm->bind(&find_ra);
6395
6396 const int kNumInstructionsToJump = 6;
6397 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6398 // Push return address (accessible to GC through exit frame pc).
6399 // This spot for ra was reserved in EnterExitFrame.
6400 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6401 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6402 RelocInfo::CODE_TARGET), true);
6403 // Call the function.
6404 masm->Jump(t9);
6405 // Make sure the stored 'ra' points to this position.
6406 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6407}
6408
6409
6410MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6411 MacroAssembler* masm,
6412 Label* miss,
6413 Label* done,
6414 Register receiver,
6415 Register properties,
6416 String* name,
6417 Register scratch0) {
6418// If names of slots in range from 1 to kProbes - 1 for the hash value are
6419 // not equal to the name and kProbes-th slot is not used (its name is the
6420 // undefined value), it guarantees the hash table doesn't contain the
6421 // property. It's true even if some slots represent deleted properties
6422 // (their names are the null value).
6423 for (int i = 0; i < kInlinedProbes; i++) {
6424 // scratch0 points to properties hash.
6425 // Compute the masked index: (hash + i + i * i) & mask.
6426 Register index = scratch0;
6427 // Capacity is smi 2^n.
6428 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
6429 __ Subu(index, index, Operand(1));
6430 __ And(index, index, Operand(
6431 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6432
6433 // Scale the index by multiplying by the entry size.
6434 ASSERT(StringDictionary::kEntrySize == 3);
6435 // index *= 3.
6436 __ mov(at, index);
6437 __ sll(index, index, 1);
6438 __ Addu(index, index, at);
6439
6440 Register entity_name = scratch0;
6441 // Having undefined at this place means the name is not contained.
6442 ASSERT_EQ(kSmiTagSize, 1);
6443 Register tmp = properties;
6444
6445 __ sll(scratch0, index, 1);
6446 __ Addu(tmp, properties, scratch0);
6447 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6448
6449 ASSERT(!tmp.is(entity_name));
6450 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6451 __ Branch(done, eq, entity_name, Operand(tmp));
6452
6453 if (i != kInlinedProbes - 1) {
6454 // Stop if found the property.
6455 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
6456
6457 // Check if the entry name is not a symbol.
6458 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6459 __ lbu(entity_name,
6460 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6461 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
6462 __ Branch(miss, eq, scratch0, Operand(zero_reg));
6463
6464 // Restore the properties.
6465 __ lw(properties,
6466 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6467 }
6468 }
6469
6470 const int spill_mask =
6471 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
6472 a2.bit() | a1.bit() | a0.bit());
6473
6474 __ MultiPush(spill_mask);
6475 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6476 __ li(a1, Operand(Handle<String>(name)));
6477 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6478 MaybeObject* result = masm->TryCallStub(&stub);
6479 if (result->IsFailure()) return result;
6480 __ MultiPop(spill_mask);
6481
6482 __ Branch(done, eq, v0, Operand(zero_reg));
6483 __ Branch(miss, ne, v0, Operand(zero_reg));
6484 return result;
6485}
6486
6487
6488// Probe the string dictionary in the |elements| register. Jump to the
6489// |done| label if a property with the given name is found. Jump to
6490// the |miss| label otherwise.
6491// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6492void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6493 Label* miss,
6494 Label* done,
6495 Register elements,
6496 Register name,
6497 Register scratch1,
6498 Register scratch2) {
6499 // Assert that name contains a string.
6500 if (FLAG_debug_code) __ AbortIfNotString(name);
6501
6502 // Compute the capacity mask.
6503 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
6504 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
6505 __ Subu(scratch1, scratch1, Operand(1));
6506
6507 // Generate an unrolled loop that performs a few probes before
6508 // giving up. Measurements done on Gmail indicate that 2 probes
6509 // cover ~93% of loads from dictionaries.
6510 for (int i = 0; i < kInlinedProbes; i++) {
6511 // Compute the masked index: (hash + i + i * i) & mask.
6512 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6513 if (i > 0) {
6514 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6515 // the hash in a separate instruction. The value hash + i + i * i is right
6516 // shifted in the following and instruction.
6517 ASSERT(StringDictionary::GetProbeOffset(i) <
6518 1 << (32 - String::kHashFieldOffset));
6519 __ Addu(scratch2, scratch2, Operand(
6520 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6521 }
6522 __ srl(scratch2, scratch2, String::kHashShift);
6523 __ And(scratch2, scratch1, scratch2);
6524
6525 // Scale the index by multiplying by the element size.
6526 ASSERT(StringDictionary::kEntrySize == 3);
6527 // scratch2 = scratch2 * 3.
6528
6529 __ mov(at, scratch2);
6530 __ sll(scratch2, scratch2, 1);
6531 __ Addu(scratch2, scratch2, at);
6532
6533 // Check if the key is identical to the name.
6534 __ sll(at, scratch2, 2);
6535 __ Addu(scratch2, elements, at);
6536 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
6537 __ Branch(done, eq, name, Operand(at));
6538 }
6539
6540 const int spill_mask =
6541 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
6542 a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
6543 ~(scratch1.bit() | scratch2.bit());
6544
6545 __ MultiPush(spill_mask);
6546 __ Move(a0, elements);
6547 __ Move(a1, name);
6548 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6549 __ CallStub(&stub);
6550 __ mov(scratch2, a2);
6551 __ MultiPop(spill_mask);
6552
6553 __ Branch(done, ne, v0, Operand(zero_reg));
6554 __ Branch(miss, eq, v0, Operand(zero_reg));
6555}
6556
6557
6558void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6559 // Registers:
6560 // result: StringDictionary to probe
6561 // a1: key
6562 // : StringDictionary to probe.
6563 // index_: will hold an index of entry if lookup is successful.
6564 // might alias with result_.
6565 // Returns:
6566 // result_ is zero if lookup failed, non zero otherwise.
6567
6568 Register result = v0;
6569 Register dictionary = a0;
6570 Register key = a1;
6571 Register index = a2;
6572 Register mask = a3;
6573 Register hash = t0;
6574 Register undefined = t1;
6575 Register entry_key = t2;
6576
6577 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6578
6579 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
6580 __ sra(mask, mask, kSmiTagSize);
6581 __ Subu(mask, mask, Operand(1));
6582
6583 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
6584
6585 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6586
6587 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6588 // Compute the masked index: (hash + i + i * i) & mask.
6589 // Capacity is smi 2^n.
6590 if (i > 0) {
6591 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6592 // the hash in a separate instruction. The value hash + i + i * i is right
6593 // shifted in the following and instruction.
6594 ASSERT(StringDictionary::GetProbeOffset(i) <
6595 1 << (32 - String::kHashFieldOffset));
6596 __ Addu(index, hash, Operand(
6597 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6598 } else {
6599 __ mov(index, hash);
6600 }
6601 __ srl(index, index, String::kHashShift);
6602 __ And(index, mask, index);
6603
6604 // Scale the index by multiplying by the entry size.
6605 ASSERT(StringDictionary::kEntrySize == 3);
6606 // index *= 3.
6607 __ mov(at, index);
6608 __ sll(index, index, 1);
6609 __ Addu(index, index, at);
6610
6611
6612 ASSERT_EQ(kSmiTagSize, 1);
6613 __ sll(index, index, 2);
6614 __ Addu(index, index, dictionary);
6615 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
6616
6617 // Having undefined at this place means the name is not contained.
6618 __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
6619
6620 // Stop if found the property.
6621 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
6622
6623 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6624 // Check if the entry name is not a symbol.
6625 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
6626 __ lbu(entry_key,
6627 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
6628 __ And(result, entry_key, Operand(kIsSymbolMask));
6629 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
6630 }
6631 }
6632
6633 __ bind(&maybe_in_dictionary);
6634 // If we are doing negative lookup then probing failure should be
6635 // treated as a lookup success. For positive lookup probing failure
6636 // should be treated as lookup failure.
6637 if (mode_ == POSITIVE_LOOKUP) {
6638 __ mov(result, zero_reg);
6639 __ Ret();
6640 }
6641
6642 __ bind(&in_dictionary);
6643 __ li(result, 1);
6644 __ Ret();
6645
6646 __ bind(&not_in_dictionary);
6647 __ mov(result, zero_reg);
6648 __ Ret();
Steve Block44f0eee2011-05-26 01:26:41 +01006649}
6650
6651
6652#undef __
6653
6654} } // namespace v8::internal
6655
6656#endif // V8_TARGET_ARCH_MIPS