blob: f14dff667fd490642e0f3dbd9914cca4904abd57 [file] [log] [blame]
lrn@chromium.org7516f052011-03-30 08:52:27 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
karlklose@chromium.org83a47282011-05-11 11:54:09 +000034#include "codegen.h"
lrn@chromium.org7516f052011-03-30 08:52:27 +000035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
vegorov@chromium.org7304bca2011-05-16 12:14:13 +000043static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44 Label* slow,
45 Condition cc,
46 bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Register lhs,
49 Register rhs,
50 Label* rhs_not_nan,
51 Label* slow,
52 bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55 Register lhs,
56 Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
lrn@chromium.org7516f052011-03-30 08:52:27 +000068
69void ToNumberStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +000070 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ mov(v0, a0);
74 __ Ret();
75
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ mov(v0, a0);
79 __ Ret();
80
81 __ bind(&call_builtin);
82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
lrn@chromium.org7516f052011-03-30 08:52:27 +000084}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +000088 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
90 Label gc;
91
92 // Pop the function info from the stack.
93 __ pop(a3);
94
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
97 v0,
98 a1,
99 a2,
100 &gc,
101 TAG_OBJECT);
102
103 int map_index = strict_mode_ == kStrictMode
104 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
105 : Context::FUNCTION_MAP_INDEX;
106
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132
133 // Return result. The argument function info has been popped already.
134 __ Ret();
135
136 // Create a new closure through the slower runtime call.
137 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000145 // Try to allocate the context in new space.
146 Label gc;
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
151 v0,
152 a1,
153 a2,
154 &gc,
155 TAG_OBJECT);
156
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
159
160 // Setup the object header.
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
166 // Setup the fixed slots.
167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
169 __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000170 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000173 // Copy the global object from the previous context.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000174 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
175 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
176
177 // Initialize the rest of the slots to undefined.
178 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
179 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
180 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
181 }
182
183 // Remove the on-stack argument and return.
184 __ mov(cp, v0);
185 __ Pop();
186 __ Ret();
187
188 // Need to collect. Call into runtime system.
189 __ bind(&gc);
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000190 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000191}
192
193
194void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000195 // Stack layout on entry:
196 // [sp]: constant elements.
197 // [sp + kPointerSize]: literal index.
198 // [sp + (2 * kPointerSize)]: literals array.
199
200 // All sizes here are multiples of kPointerSize.
201 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
202 int size = JSArray::kSize + elements_size;
203
204 // Load boilerplate object into r3 and check if we need to create a
205 // boilerplate.
206 Label slow_case;
207 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
208 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
209 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
210 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
211 __ Addu(t0, a3, t0);
212 __ lw(a3, MemOperand(t0));
213 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
214 __ Branch(&slow_case, eq, a3, Operand(t1));
215
216 if (FLAG_debug_code) {
217 const char* message;
218 Heap::RootListIndex expected_map_index;
219 if (mode_ == CLONE_ELEMENTS) {
220 message = "Expected (writable) fixed array";
221 expected_map_index = Heap::kFixedArrayMapRootIndex;
222 } else {
223 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
224 message = "Expected copy-on-write fixed array";
225 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
226 }
227 __ push(a3);
228 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
229 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
230 __ LoadRoot(at, expected_map_index);
231 __ Assert(eq, message, a3, Operand(at));
232 __ pop(a3);
233 }
234
235 // Allocate both the JS array and the elements array in one big
236 // allocation. This avoids multiple limit checks.
237 // Return new object in v0.
238 __ AllocateInNewSpace(size,
239 v0,
240 a1,
241 a2,
242 &slow_case,
243 TAG_OBJECT);
244
245 // Copy the JS array part.
246 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
247 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
248 __ lw(a1, FieldMemOperand(a3, i));
249 __ sw(a1, FieldMemOperand(v0, i));
250 }
251 }
252
253 if (length_ > 0) {
254 // Get hold of the elements array of the boilerplate and setup the
255 // elements pointer in the resulting object.
256 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
257 __ Addu(a2, v0, Operand(JSArray::kSize));
258 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
259
260 // Copy the elements array.
261 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
262 }
263
264 // Return and remove the on-stack parameters.
265 __ Addu(sp, sp, Operand(3 * kPointerSize));
266 __ Ret();
267
268 __ bind(&slow_case);
269 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000270}
271
272
273// Takes a Smi and converts to an IEEE 64 bit floating point value in two
274// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
275// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
276// scratch register. Destroys the source register. No GC occurs during this
277// stub so you don't have to set up the frame.
278class ConvertToDoubleStub : public CodeStub {
279 public:
280 ConvertToDoubleStub(Register result_reg_1,
281 Register result_reg_2,
282 Register source_reg,
283 Register scratch_reg)
284 : result1_(result_reg_1),
285 result2_(result_reg_2),
286 source_(source_reg),
287 zeros_(scratch_reg) { }
288
289 private:
290 Register result1_;
291 Register result2_;
292 Register source_;
293 Register zeros_;
294
295 // Minor key encoding in 16 bits.
296 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
297 class OpBits: public BitField<Token::Value, 2, 14> {};
298
299 Major MajorKey() { return ConvertToDouble; }
300 int MinorKey() {
301 // Encode the parameters in a unique 16 bit value.
302 return result1_.code() +
303 (result2_.code() << 4) +
304 (source_.code() << 8) +
305 (zeros_.code() << 12);
306 }
307
308 void Generate(MacroAssembler* masm);
309
310 const char* GetName() { return "ConvertToDoubleStub"; }
311
312#ifdef DEBUG
313 void Print() { PrintF("ConvertToDoubleStub\n"); }
314#endif
315};
316
317
318void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000319#ifndef BIG_ENDIAN_FLOATING_POINT
320 Register exponent = result1_;
321 Register mantissa = result2_;
322#else
323 Register exponent = result2_;
324 Register mantissa = result1_;
325#endif
326 Label not_special;
327 // Convert from Smi to integer.
328 __ sra(source_, source_, kSmiTagSize);
329 // Move sign bit from source to destination. This works because the sign bit
330 // in the exponent word of the double has the same position and polarity as
331 // the 2's complement sign bit in a Smi.
332 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
333 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
334 // Subtract from 0 if source was negative.
335 __ subu(at, zero_reg, source_);
336 __ movn(source_, at, exponent);
337
338 // We have -1, 0 or 1, which we treat specially. Register source_ contains
339 // absolute value: it is either equal to 1 (special case of -1 and 1),
340 // greater than 1 (not a special case) or less than 1 (special case of 0).
341 __ Branch(&not_special, gt, source_, Operand(1));
342
343 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
344 static const uint32_t exponent_word_for_1 =
345 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
346 // Safe to use 'at' as dest reg here.
347 __ Or(at, exponent, Operand(exponent_word_for_1));
348 __ movn(exponent, at, source_); // Write exp when source not 0.
349 // 1, 0 and -1 all have 0 for the second word.
350 __ mov(mantissa, zero_reg);
351 __ Ret();
352
353 __ bind(&not_special);
354 // Count leading zeros.
355 // Gets the wrong answer for 0, but we already checked for that case above.
356 __ clz(zeros_, source_);
357 // Compute exponent and or it into the exponent register.
358 // We use mantissa as a scratch register here.
359 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
360 __ subu(mantissa, mantissa, zeros_);
361 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
362 __ Or(exponent, exponent, mantissa);
363
364 // Shift up the source chopping the top bit off.
365 __ Addu(zeros_, zeros_, Operand(1));
366 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
367 __ sllv(source_, source_, zeros_);
368 // Compute lower part of fraction (last 12 bits).
369 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
370 // And the top (top 20 bits).
371 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
372 __ or_(exponent, exponent, source_);
373
374 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +0000375}
376
377
lrn@chromium.org7516f052011-03-30 08:52:27 +0000378void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
379 FloatingPointHelper::Destination destination,
380 Register scratch1,
381 Register scratch2) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000382 if (CpuFeatures::IsSupported(FPU)) {
383 CpuFeatures::Scope scope(FPU);
384 __ sra(scratch1, a0, kSmiTagSize);
385 __ mtc1(scratch1, f14);
386 __ cvt_d_w(f14, f14);
387 __ sra(scratch1, a1, kSmiTagSize);
388 __ mtc1(scratch1, f12);
389 __ cvt_d_w(f12, f12);
390 if (destination == kCoreRegisters) {
danno@chromium.org40cb8782011-05-25 07:58:50 +0000391 __ Move(a2, a3, f14);
392 __ Move(a0, a1, f12);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000393 }
394 } else {
395 ASSERT(destination == kCoreRegisters);
396 // Write Smi from a0 to a3 and a2 in double format.
397 __ mov(scratch1, a0);
398 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
399 __ push(ra);
400 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
401 // Write Smi from a1 to a1 and a0 in double format.
402 __ mov(scratch1, a1);
403 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
404 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
405 __ pop(ra);
406 }
lrn@chromium.org7516f052011-03-30 08:52:27 +0000407}
408
409
410void FloatingPointHelper::LoadOperands(
411 MacroAssembler* masm,
412 FloatingPointHelper::Destination destination,
413 Register heap_number_map,
414 Register scratch1,
415 Register scratch2,
416 Label* slow) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000417
418 // Load right operand (a0) to f12 or a2/a3.
419 LoadNumber(masm, destination,
420 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
421
422 // Load left operand (a1) to f14 or a0/a1.
423 LoadNumber(masm, destination,
424 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000425}
426
427
428void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
429 Destination destination,
430 Register object,
431 FPURegister dst,
432 Register dst1,
433 Register dst2,
434 Register heap_number_map,
435 Register scratch1,
436 Register scratch2,
437 Label* not_number) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000438 if (FLAG_debug_code) {
439 __ AbortIfNotRootValue(heap_number_map,
440 Heap::kHeapNumberMapRootIndex,
441 "HeapNumberMap register clobbered.");
442 }
443
444 Label is_smi, done;
445
446 __ JumpIfSmi(object, &is_smi);
447 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
448
449 // Handle loading a double from a heap number.
450 if (CpuFeatures::IsSupported(FPU) &&
451 destination == kFPURegisters) {
452 CpuFeatures::Scope scope(FPU);
453 // Load the double from tagged HeapNumber to double register.
454
455 // ARM uses a workaround here because of the unaligned HeapNumber
456 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
457 // point in generating even more instructions.
458 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
459 } else {
460 ASSERT(destination == kCoreRegisters);
461 // Load the double from heap number to dst1 and dst2 in double format.
462 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
463 __ lw(dst2, FieldMemOperand(object,
464 HeapNumber::kValueOffset + kPointerSize));
465 }
466 __ Branch(&done);
467
468 // Handle loading a double from a smi.
469 __ bind(&is_smi);
470 if (CpuFeatures::IsSupported(FPU)) {
471 CpuFeatures::Scope scope(FPU);
472 // Convert smi to double using FPU instructions.
473 __ SmiUntag(scratch1, object);
474 __ mtc1(scratch1, dst);
475 __ cvt_d_w(dst, dst);
476 if (destination == kCoreRegisters) {
477 // Load the converted smi to dst1 and dst2 in double format.
danno@chromium.org40cb8782011-05-25 07:58:50 +0000478 __ Move(dst1, dst2, dst);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000479 }
480 } else {
481 ASSERT(destination == kCoreRegisters);
482 // Write smi to dst1 and dst2 double format.
483 __ mov(scratch1, object);
484 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
485 __ push(ra);
486 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
487 __ pop(ra);
488 }
489
490 __ bind(&done);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000491}
492
493
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000494void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
495 Register object,
496 Register dst,
497 Register heap_number_map,
498 Register scratch1,
499 Register scratch2,
500 Register scratch3,
501 FPURegister double_scratch,
502 Label* not_number) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000503 if (FLAG_debug_code) {
504 __ AbortIfNotRootValue(heap_number_map,
505 Heap::kHeapNumberMapRootIndex,
506 "HeapNumberMap register clobbered.");
507 }
508 Label is_smi;
509 Label done;
510 Label not_in_int32_range;
511
512 __ JumpIfSmi(object, &is_smi);
513 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
514 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
515 __ ConvertToInt32(object,
516 dst,
517 scratch1,
518 scratch2,
519 double_scratch,
520 &not_in_int32_range);
521 __ jmp(&done);
522
523 __ bind(&not_in_int32_range);
524 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
525 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
526
527 __ EmitOutOfInt32RangeTruncate(dst,
528 scratch1,
529 scratch2,
530 scratch3);
531
532 __ jmp(&done);
533
534 __ bind(&is_smi);
535 __ SmiUntag(dst, object);
536 __ bind(&done);
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000537}
538
539
540void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
541 Register int_scratch,
542 Destination destination,
543 FPURegister double_dst,
544 Register dst1,
545 Register dst2,
546 Register scratch2,
547 FPURegister single_scratch) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000548 ASSERT(!int_scratch.is(scratch2));
danno@chromium.org40cb8782011-05-25 07:58:50 +0000549 ASSERT(!int_scratch.is(dst1));
550 ASSERT(!int_scratch.is(dst2));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000551
552 Label done;
553
554 if (CpuFeatures::IsSupported(FPU)) {
555 CpuFeatures::Scope scope(FPU);
556 __ mtc1(int_scratch, single_scratch);
557 __ cvt_d_w(double_dst, single_scratch);
558 if (destination == kCoreRegisters) {
danno@chromium.org40cb8782011-05-25 07:58:50 +0000559 __ Move(dst1, dst2, double_dst);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000560 }
561 } else {
562 Label fewer_than_20_useful_bits;
563 // Expected output:
564 // | dst2 | dst1 |
565 // | s | exp | mantissa |
566
567 // Check for zero.
568 __ mov(dst2, int_scratch);
569 __ mov(dst1, int_scratch);
570 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
571
572 // Preload the sign of the value.
573 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
574 // Get the absolute value of the object (as an unsigned integer).
575 Label skip_sub;
576 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
577 __ Subu(int_scratch, zero_reg, int_scratch);
578 __ bind(&skip_sub);
579
580 // Get mantisssa[51:20].
581
582 // Get the position of the first set bit.
583 __ clz(dst1, int_scratch);
584 __ li(scratch2, 31);
585 __ Subu(dst1, scratch2, dst1);
586
587 // Set the exponent.
588 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
589 __ Ins(dst2, scratch2,
590 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
591
592 // Clear the first non null bit.
593 __ li(scratch2, Operand(1));
594 __ sllv(scratch2, scratch2, dst1);
595 __ li(at, -1);
596 __ Xor(scratch2, scratch2, at);
597 __ And(int_scratch, int_scratch, scratch2);
598
599 // Get the number of bits to set in the lower part of the mantissa.
600 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
601 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
602 // Set the higher 20 bits of the mantissa.
603 __ srlv(at, int_scratch, scratch2);
604 __ or_(dst2, dst2, at);
605 __ li(at, 32);
606 __ subu(scratch2, at, scratch2);
607 __ sllv(dst1, int_scratch, scratch2);
608 __ Branch(&done);
609
610 __ bind(&fewer_than_20_useful_bits);
611 __ li(at, HeapNumber::kMantissaBitsInTopWord);
612 __ subu(scratch2, at, dst1);
613 __ sllv(scratch2, int_scratch, scratch2);
614 __ Or(dst2, dst2, scratch2);
615 // Set dst1 to 0.
616 __ mov(dst1, zero_reg);
617 }
618 __ bind(&done);
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000619}
620
621
622void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
623 Register object,
624 Destination destination,
625 FPURegister double_dst,
626 Register dst1,
627 Register dst2,
628 Register heap_number_map,
629 Register scratch1,
630 Register scratch2,
631 FPURegister single_scratch,
632 Label* not_int32) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000633 ASSERT(!scratch1.is(object) && !scratch2.is(object));
634 ASSERT(!scratch1.is(scratch2));
635 ASSERT(!heap_number_map.is(object) &&
636 !heap_number_map.is(scratch1) &&
637 !heap_number_map.is(scratch2));
638
639 Label done, obj_is_not_smi;
640
641 __ JumpIfNotSmi(object, &obj_is_not_smi);
642 __ SmiUntag(scratch1, object);
643 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
644 scratch2, single_scratch);
645 __ Branch(&done);
646
647 __ bind(&obj_is_not_smi);
648 if (FLAG_debug_code) {
649 __ AbortIfNotRootValue(heap_number_map,
650 Heap::kHeapNumberMapRootIndex,
651 "HeapNumberMap register clobbered.");
652 }
653 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
654
655 // Load the number.
656 if (CpuFeatures::IsSupported(FPU)) {
657 CpuFeatures::Scope scope(FPU);
658 // Load the double value.
659 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
660
661 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
662 // On MIPS a lot of things cannot be implemented the same way so right
663 // now it makes a lot more sense to just do things manually.
664
665 // Save FCSR.
666 __ cfc1(scratch1, FCSR);
667 // Disable FPU exceptions.
668 __ ctc1(zero_reg, FCSR);
669 __ trunc_w_d(single_scratch, double_dst);
670 // Retrieve FCSR.
671 __ cfc1(scratch2, FCSR);
672 // Restore FCSR.
673 __ ctc1(scratch1, FCSR);
674
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +0000675 // Check for inexact conversion or exception.
676 __ And(scratch2, scratch2, kFCSRFlagMask);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000677
678 // Jump to not_int32 if the operation did not succeed.
679 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
680
681 if (destination == kCoreRegisters) {
danno@chromium.org40cb8782011-05-25 07:58:50 +0000682 __ Move(dst1, dst2, double_dst);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000683 }
684
685 } else {
686 ASSERT(!scratch1.is(object) && !scratch2.is(object));
687 // Load the double value in the destination registers.
688 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
689 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
690
691 // Check for 0 and -0.
692 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
693 __ Or(scratch1, scratch1, Operand(dst2));
694 __ Branch(&done, eq, scratch1, Operand(zero_reg));
695
696 // Check that the value can be exactly represented by a 32-bit integer.
697 // Jump to not_int32 if that's not the case.
698 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
699
700 // dst1 and dst2 were trashed. Reload the double value.
701 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
702 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
703 }
704
705 __ bind(&done);
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000706}
707
708
709void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
710 Register object,
711 Register dst,
712 Register heap_number_map,
713 Register scratch1,
714 Register scratch2,
715 Register scratch3,
716 FPURegister double_scratch,
717 Label* not_int32) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000718 ASSERT(!dst.is(object));
719 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
720 ASSERT(!scratch1.is(scratch2) &&
721 !scratch1.is(scratch3) &&
722 !scratch2.is(scratch3));
723
724 Label done;
725
726 // Untag the object into the destination register.
727 __ SmiUntag(dst, object);
728 // Just return if the object is a smi.
729 __ JumpIfSmi(object, &done);
730
731 if (FLAG_debug_code) {
732 __ AbortIfNotRootValue(heap_number_map,
733 Heap::kHeapNumberMapRootIndex,
734 "HeapNumberMap register clobbered.");
735 }
736 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
737
738 // Object is a heap number.
739 // Convert the floating point value to a 32-bit integer.
740 if (CpuFeatures::IsSupported(FPU)) {
741 CpuFeatures::Scope scope(FPU);
742 // Load the double value.
743 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
744
745 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
746 // On MIPS a lot of things cannot be implemented the same way so right
747 // now it makes a lot more sense to just do things manually.
748
749 // Save FCSR.
750 __ cfc1(scratch1, FCSR);
751 // Disable FPU exceptions.
752 __ ctc1(zero_reg, FCSR);
753 __ trunc_w_d(double_scratch, double_scratch);
754 // Retrieve FCSR.
755 __ cfc1(scratch2, FCSR);
756 // Restore FCSR.
757 __ ctc1(scratch1, FCSR);
758
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +0000759 // Check for inexact conversion or exception.
760 __ And(scratch2, scratch2, kFCSRFlagMask);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000761
762 // Jump to not_int32 if the operation did not succeed.
763 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
764 // Get the result in the destination register.
765 __ mfc1(dst, double_scratch);
766
767 } else {
768 // Load the double value in the destination registers.
769 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
770 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
771
772 // Check for 0 and -0.
773 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
774 __ Or(dst, scratch2, Operand(dst));
775 __ Branch(&done, eq, dst, Operand(zero_reg));
776
777 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
778
779 // Registers state after DoubleIs32BitInteger.
780 // dst: mantissa[51:20].
781 // scratch2: 1
782
783 // Shift back the higher bits of the mantissa.
784 __ srlv(dst, dst, scratch3);
785 // Set the implicit first bit.
786 __ li(at, 32);
787 __ subu(scratch3, at, scratch3);
788 __ sllv(scratch2, scratch2, scratch3);
789 __ Or(dst, dst, scratch2);
790 // Set the sign.
791 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
792 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
793 Label skip_sub;
794 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
795 __ Subu(dst, zero_reg, dst);
796 __ bind(&skip_sub);
797 }
798
799 __ bind(&done);
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000800}
801
802
803void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
804 Register src1,
805 Register src2,
806 Register dst,
807 Register scratch,
808 Label* not_int32) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000809 // Get exponent alone in scratch.
810 __ Ext(scratch,
811 src1,
812 HeapNumber::kExponentShift,
813 HeapNumber::kExponentBits);
814
815 // Substract the bias from the exponent.
816 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
817
818 // src1: higher (exponent) part of the double value.
819 // src2: lower (mantissa) part of the double value.
820 // scratch: unbiased exponent.
821
822 // Fast cases. Check for obvious non 32-bit integer values.
823 // Negative exponent cannot yield 32-bit integers.
824 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
825 // Exponent greater than 31 cannot yield 32-bit integers.
826 // Also, a positive value with an exponent equal to 31 is outside of the
827 // signed 32-bit integer range.
828 // Another way to put it is that if (exponent - signbit) > 30 then the
829 // number cannot be represented as an int32.
830 Register tmp = dst;
831 __ srl(at, src1, 31);
832 __ subu(tmp, scratch, at);
833 __ Branch(not_int32, gt, tmp, Operand(30));
834 // - Bits [21:0] in the mantissa are not null.
835 __ And(tmp, src2, 0x3fffff);
836 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
837
838 // Otherwise the exponent needs to be big enough to shift left all the
839 // non zero bits left. So we need the (30 - exponent) last bits of the
840 // 31 higher bits of the mantissa to be null.
841 // Because bits [21:0] are null, we can check instead that the
842 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
843
844 // Get the 32 higher bits of the mantissa in dst.
845 __ Ext(dst,
846 src2,
847 HeapNumber::kMantissaBitsInTopWord,
848 32 - HeapNumber::kMantissaBitsInTopWord);
849 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
850 __ or_(dst, dst, at);
851
852 // Create the mask and test the lower bits (of the higher bits).
853 __ li(at, 32);
854 __ subu(scratch, at, scratch);
855 __ li(src2, 1);
856 __ sllv(src1, src2, scratch);
857 __ Subu(src1, src1, Operand(1));
858 __ And(src1, dst, src1);
859 __ Branch(not_int32, ne, src1, Operand(zero_reg));
karlklose@chromium.org83a47282011-05-11 11:54:09 +0000860}
861
862
863void FloatingPointHelper::CallCCodeForDoubleOperation(
864 MacroAssembler* masm,
865 Token::Value op,
866 Register heap_number_result,
867 Register scratch) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000868 // Using core registers:
869 // a0: Left value (least significant part of mantissa).
870 // a1: Left value (sign, exponent, top of mantissa).
871 // a2: Right value (least significant part of mantissa).
872 // a3: Right value (sign, exponent, top of mantissa).
873
874 // Assert that heap_number_result is saved.
875 // We currently always use s0 to pass it.
876 ASSERT(heap_number_result.is(s0));
877
878 // Push the current return address before the C call.
879 __ push(ra);
880 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
881 if (!IsMipsSoftFloatABI) {
882 CpuFeatures::Scope scope(FPU);
883 // We are not using MIPS FPU instructions, and parameters for the runtime
884 // function call are prepaired in a0-a3 registers, but function we are
885 // calling is compiled with hard-float flag and expecting hard float ABI
886 // (parameters in f12/f14 registers). We need to copy parameters from
887 // a0-a3 registers to f12/f14 register pairs.
danno@chromium.org40cb8782011-05-25 07:58:50 +0000888 __ Move(f12, a0, a1);
889 __ Move(f14, a2, a3);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000890 }
891 // Call C routine that may not cause GC or other trouble.
892 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
893 4);
894 // Store answer in the overwritable heap number.
895 if (!IsMipsSoftFloatABI) {
896 CpuFeatures::Scope scope(FPU);
897 // Double returned in register f0.
898 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
899 } else {
900 // Double returned in registers v0 and v1.
901 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
902 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
903 }
904 // Place heap_number_result in v0 and return to the pushed return address.
905 __ mov(v0, heap_number_result);
906 __ pop(ra);
907 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +0000908}
909
910
911// See comment for class, this does NOT work for int32's that are in Smi range.
912void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000913 Label max_negative_int;
914 // the_int_ has the answer which is a signed int32 but not a Smi.
915 // We test for the special value that has a different exponent.
916 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
917 // Test sign, and save for later conditionals.
918 __ And(sign_, the_int_, Operand(0x80000000u));
919 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
920
921 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
922 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
923 uint32_t non_smi_exponent =
924 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
925 __ li(scratch_, Operand(non_smi_exponent));
926 // Set the sign bit in scratch_ if the value was negative.
927 __ or_(scratch_, scratch_, sign_);
928 // Subtract from 0 if the value was negative.
929 __ subu(at, zero_reg, the_int_);
930 __ movn(the_int_, at, sign_);
931 // We should be masking the implict first digit of the mantissa away here,
932 // but it just ends up combining harmlessly with the last digit of the
933 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
934 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
935 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
936 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
937 __ srl(at, the_int_, shift_distance);
938 __ or_(scratch_, scratch_, at);
939 __ sw(scratch_, FieldMemOperand(the_heap_number_,
940 HeapNumber::kExponentOffset));
941 __ sll(scratch_, the_int_, 32 - shift_distance);
942 __ sw(scratch_, FieldMemOperand(the_heap_number_,
943 HeapNumber::kMantissaOffset));
944 __ Ret();
945
946 __ bind(&max_negative_int);
947 // The max negative int32 is stored as a positive number in the mantissa of
948 // a double because it uses a sign bit instead of using two's complement.
949 // The actual mantissa bits stored are all 0 because the implicit most
950 // significant 1 bit is not stored.
951 non_smi_exponent += 1 << HeapNumber::kExponentShift;
952 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
953 __ sw(scratch_,
954 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
955 __ mov(scratch_, zero_reg);
956 __ sw(scratch_,
957 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
958 __ Ret();
959}
960
961
962// Handle the case where the lhs and rhs are the same object.
963// Equality is almost reflexive (everything but NaN), so this is a test
964// for "identity and not NaN".
965static void EmitIdenticalObjectComparison(MacroAssembler* masm,
966 Label* slow,
967 Condition cc,
968 bool never_nan_nan) {
969 Label not_identical;
970 Label heap_number, return_equal;
971 Register exp_mask_reg = t5;
972
973 __ Branch(&not_identical, ne, a0, Operand(a1));
974
975 // The two objects are identical. If we know that one of them isn't NaN then
976 // we now know they test equal.
977 if (cc != eq || !never_nan_nan) {
978 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
979
980 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
981 // so we do the second best thing - test it ourselves.
982 // They are both equal and they are not both Smis so both of them are not
983 // Smis. If it's not a heap number, then return equal.
984 if (cc == less || cc == greater) {
985 __ GetObjectType(a0, t4, t4);
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +0000986 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000987 } else {
988 __ GetObjectType(a0, t4, t4);
989 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
990 // Comparing JS objects with <=, >= is complicated.
991 if (cc != eq) {
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +0000992 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000993 // Normally here we fall through to return_equal, but undefined is
994 // special: (undefined == undefined) == true, but
995 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
996 if (cc == less_equal || cc == greater_equal) {
997 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
998 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
999 __ Branch(&return_equal, ne, a0, Operand(t2));
1000 if (cc == le) {
1001 // undefined <= undefined should fail.
1002 __ li(v0, Operand(GREATER));
1003 } else {
1004 // undefined >= undefined should fail.
1005 __ li(v0, Operand(LESS));
1006 }
1007 __ Ret();
1008 }
1009 }
1010 }
1011 }
1012
1013 __ bind(&return_equal);
1014 if (cc == less) {
1015 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1016 } else if (cc == greater) {
1017 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1018 } else {
1019 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1020 }
1021 __ Ret();
1022
1023 if (cc != eq || !never_nan_nan) {
1024 // For less and greater we don't have to check for NaN since the result of
1025 // x < x is false regardless. For the others here is some code to check
1026 // for NaN.
1027 if (cc != lt && cc != gt) {
1028 __ bind(&heap_number);
1029 // It is a heap number, so return non-equal if it's NaN and equal if it's
1030 // not NaN.
1031
1032 // The representation of NaN values has all exponent bits (52..62) set,
1033 // and not all mantissa bits (0..51) clear.
1034 // Read top bits of double representation (second word of value).
1035 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1036 // Test that exponent bits are all set.
1037 __ And(t3, t2, Operand(exp_mask_reg));
1038 // If all bits not set (ne cond), then not a NaN, objects are equal.
1039 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1040
1041 // Shift out flag and all exponent bits, retaining only mantissa.
1042 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1043 // Or with all low-bits of mantissa.
1044 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1045 __ Or(v0, t3, Operand(t2));
1046 // For equal we already have the right value in v0: Return zero (equal)
1047 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1048 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1049 // value if it's a NaN.
1050 if (cc != eq) {
1051 // All-zero means Infinity means equal.
1052 __ Ret(eq, v0, Operand(zero_reg));
1053 if (cc == le) {
1054 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1055 } else {
1056 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1057 }
1058 }
1059 __ Ret();
1060 }
1061 // No fall through here.
1062 }
1063
1064 __ bind(&not_identical);
1065}
1066
1067
1068static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1069 Register lhs,
1070 Register rhs,
1071 Label* both_loaded_as_doubles,
1072 Label* slow,
1073 bool strict) {
1074 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1075 (lhs.is(a1) && rhs.is(a0)));
1076
1077 Label lhs_is_smi;
1078 __ And(t0, lhs, Operand(kSmiTagMask));
1079 __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
1080 // Rhs is a Smi.
1081 // Check whether the non-smi is a heap number.
1082 __ GetObjectType(lhs, t4, t4);
1083 if (strict) {
1084 // If lhs was not a number and rhs was a Smi then strict equality cannot
1085 // succeed. Return non-equal (lhs is already not zero).
1086 __ mov(v0, lhs);
1087 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1088 } else {
1089 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1090 // the runtime.
1091 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1092 }
1093
1094 // Rhs is a smi, lhs is a number.
1095 // Convert smi rhs to double.
1096 if (CpuFeatures::IsSupported(FPU)) {
1097 CpuFeatures::Scope scope(FPU);
1098 __ sra(at, rhs, kSmiTagSize);
1099 __ mtc1(at, f14);
1100 __ cvt_d_w(f14, f14);
1101 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1102 } else {
1103 // Load lhs to a double in a2, a3.
1104 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1105 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1106
1107 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1108 __ mov(t6, rhs);
1109 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1110 __ push(ra);
1111 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
1112
1113 __ pop(ra);
1114 }
1115
1116 // We now have both loaded as doubles.
1117 __ jmp(both_loaded_as_doubles);
1118
1119 __ bind(&lhs_is_smi);
1120 // Lhs is a Smi. Check whether the non-smi is a heap number.
1121 __ GetObjectType(rhs, t4, t4);
1122 if (strict) {
1123 // If lhs was not a number and rhs was a Smi then strict equality cannot
1124 // succeed. Return non-equal.
1125 __ li(v0, Operand(1));
1126 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1127 } else {
1128 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1129 // the runtime.
1130 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1131 }
1132
1133 // Lhs is a smi, rhs is a number.
1134 // Convert smi lhs to double.
1135 if (CpuFeatures::IsSupported(FPU)) {
1136 CpuFeatures::Scope scope(FPU);
1137 __ sra(at, lhs, kSmiTagSize);
1138 __ mtc1(at, f12);
1139 __ cvt_d_w(f12, f12);
1140 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1141 } else {
1142 // Convert lhs to a double format. t5 is scratch.
1143 __ mov(t6, lhs);
1144 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1145 __ push(ra);
1146 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
1147 __ pop(ra);
1148 // Load rhs to a double in a1, a0.
1149 if (rhs.is(a0)) {
1150 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1151 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1152 } else {
1153 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1154 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1155 }
1156 }
1157 // Fall through to both_loaded_as_doubles.
lrn@chromium.org7516f052011-03-30 08:52:27 +00001158}
1159
1160
1161void EmitNanCheck(MacroAssembler* masm, Condition cc) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001162 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1163 if (CpuFeatures::IsSupported(FPU)) {
1164 CpuFeatures::Scope scope(FPU);
1165 // Lhs and rhs are already loaded to f12 and f14 register pairs.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001166 __ Move(t0, t1, f14);
1167 __ Move(t2, t3, f12);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001168 } else {
1169 // Lhs and rhs are already loaded to GP registers.
1170 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1171 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1172 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1173 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1174 }
1175 Register rhs_exponent = exp_first ? t0 : t1;
1176 Register lhs_exponent = exp_first ? t2 : t3;
1177 Register rhs_mantissa = exp_first ? t1 : t0;
1178 Register lhs_mantissa = exp_first ? t3 : t2;
1179 Label one_is_nan, neither_is_nan;
1180 Label lhs_not_nan_exp_mask_is_loaded;
1181
1182 Register exp_mask_reg = t4;
1183 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1184 __ and_(t5, lhs_exponent, exp_mask_reg);
1185 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1186
1187 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1188 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1189
1190 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1191
1192 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1193 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1194 __ and_(t5, rhs_exponent, exp_mask_reg);
1195
1196 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1197
1198 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1199 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1200
1201 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1202
1203 __ bind(&one_is_nan);
1204 // NaN comparisons always fail.
1205 // Load whatever we need in v0 to make the comparison fail.
1206 if (cc == lt || cc == le) {
1207 __ li(v0, Operand(GREATER));
1208 } else {
1209 __ li(v0, Operand(LESS));
1210 }
1211 __ Ret(); // Return.
1212
1213 __ bind(&neither_is_nan);
1214}
1215
1216
1217static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1218 // f12 and f14 have the two doubles. Neither is a NaN.
1219 // Call a native function to do a comparison between two non-NaNs.
1220 // Call C routine that may not cause GC or other trouble.
1221 // We use a call_was and return manually because we need arguments slots to
1222 // be freed.
1223
1224 Label return_result_not_equal, return_result_equal;
1225 if (cc == eq) {
1226 // Doubles are not equal unless they have the same bit pattern.
1227 // Exception: 0 and -0.
1228 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1229 if (CpuFeatures::IsSupported(FPU)) {
danno@chromium.org40cb8782011-05-25 07:58:50 +00001230 CpuFeatures::Scope scope(FPU);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001231 // Lhs and rhs are already loaded to f12 and f14 register pairs.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001232 __ Move(t0, t1, f14);
1233 __ Move(t2, t3, f12);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001234 } else {
1235 // Lhs and rhs are already loaded to GP registers.
1236 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1237 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1238 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1239 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1240 }
1241 Register rhs_exponent = exp_first ? t0 : t1;
1242 Register lhs_exponent = exp_first ? t2 : t3;
1243 Register rhs_mantissa = exp_first ? t1 : t0;
1244 Register lhs_mantissa = exp_first ? t3 : t2;
1245
1246 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1247 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1248
1249 __ subu(v0, rhs_exponent, lhs_exponent);
1250 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1251 // 0, -0 case.
1252 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1253 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1254 __ or_(t4, rhs_exponent, lhs_exponent);
1255 __ or_(t4, t4, rhs_mantissa);
1256
1257 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1258
1259 __ bind(&return_result_equal);
1260 __ li(v0, Operand(EQUAL));
1261 __ Ret();
1262 }
1263
1264 __ bind(&return_result_not_equal);
1265
1266 if (!CpuFeatures::IsSupported(FPU)) {
1267 __ push(ra);
1268 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
1269 if (!IsMipsSoftFloatABI) {
1270 // We are not using MIPS FPU instructions, and parameters for the runtime
1271 // function call are prepaired in a0-a3 registers, but function we are
1272 // calling is compiled with hard-float flag and expecting hard float ABI
1273 // (parameters in f12/f14 registers). We need to copy parameters from
1274 // a0-a3 registers to f12/f14 register pairs.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001275 __ Move(f12, a0, a1);
1276 __ Move(f14, a2, a3);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001277 }
1278 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
1279 __ pop(ra); // Because this function returns int, result is in v0.
1280 __ Ret();
1281 } else {
1282 CpuFeatures::Scope scope(FPU);
1283 Label equal, less_than;
1284 __ c(EQ, D, f12, f14);
1285 __ bc1t(&equal);
1286 __ nop();
1287
1288 __ c(OLT, D, f12, f14);
1289 __ bc1t(&less_than);
1290 __ nop();
1291
1292 // Not equal, not less, not NaN, must be greater.
1293 __ li(v0, Operand(GREATER));
1294 __ Ret();
1295
1296 __ bind(&equal);
1297 __ li(v0, Operand(EQUAL));
1298 __ Ret();
1299
1300 __ bind(&less_than);
1301 __ li(v0, Operand(LESS));
1302 __ Ret();
1303 }
1304}
1305
1306
1307static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1308 Register lhs,
1309 Register rhs) {
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001310 // If either operand is a JS object or an oddball value, then they are
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001311 // not equal since their pointers are different.
1312 // There is no test for undetectability in strict equality.
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001313 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001314 Label first_non_object;
1315 // Get the type of the first operand into a2 and compare it with
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001316 // FIRST_SPEC_OBJECT_TYPE.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001317 __ GetObjectType(lhs, a2, a2);
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001318 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001319
1320 // Return non-zero.
1321 Label return_not_equal;
1322 __ bind(&return_not_equal);
1323 __ li(v0, Operand(1));
1324 __ Ret();
1325
1326 __ bind(&first_non_object);
1327 // Check for oddballs: true, false, null, undefined.
1328 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1329
1330 __ GetObjectType(rhs, a3, a3);
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001331 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001332
1333 // Check for oddballs: true, false, null, undefined.
1334 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1335
1336 // Now that we have the types we might as well check for symbol-symbol.
1337 // Ensure that no non-strings have the symbol bit set.
1338 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1339 STATIC_ASSERT(kSymbolTag != 0);
1340 __ And(t2, a2, Operand(a3));
1341 __ And(t0, t2, Operand(kIsSymbolMask));
1342 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1343}
1344
1345
1346static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1347 Register lhs,
1348 Register rhs,
1349 Label* both_loaded_as_doubles,
1350 Label* not_heap_numbers,
1351 Label* slow) {
1352 __ GetObjectType(lhs, a3, a2);
1353 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1354 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1355 // If first was a heap number & second wasn't, go to slow case.
1356 __ Branch(slow, ne, a3, Operand(a2));
1357
1358 // Both are heap numbers. Load them up then jump to the code we have
1359 // for that.
1360 if (CpuFeatures::IsSupported(FPU)) {
1361 CpuFeatures::Scope scope(FPU);
1362 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1363 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1364 } else {
1365 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1366 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1367 if (rhs.is(a0)) {
1368 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1369 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1370 } else {
1371 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1372 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1373 }
1374 }
1375 __ jmp(both_loaded_as_doubles);
1376}
1377
1378
1379// Fast negative check for symbol-to-symbol equality.
1380static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1381 Register lhs,
1382 Register rhs,
1383 Label* possible_strings,
1384 Label* not_both_strings) {
1385 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1386 (lhs.is(a1) && rhs.is(a0)));
1387
1388 // a2 is object type of lhs.
1389 // Ensure that no non-strings have the symbol bit set.
1390 Label object_test;
1391 STATIC_ASSERT(kSymbolTag != 0);
1392 __ And(at, a2, Operand(kIsNotStringMask));
1393 __ Branch(&object_test, ne, at, Operand(zero_reg));
1394 __ And(at, a2, Operand(kIsSymbolMask));
1395 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1396 __ GetObjectType(rhs, a3, a3);
1397 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1398 __ And(at, a3, Operand(kIsSymbolMask));
1399 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1400
1401 // Both are symbols. We already checked they weren't the same pointer
1402 // so they are not equal.
1403 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1404 __ Ret();
1405
1406 __ bind(&object_test);
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001407 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001408 __ GetObjectType(rhs, a2, a3);
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001409 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001410
1411 // If both objects are undetectable, they are equal. Otherwise, they
1412 // are not equal, since they are different objects and an object is not
1413 // equal to undefined.
1414 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1415 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1416 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1417 __ and_(a0, a2, a3);
1418 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1419 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1420 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +00001421}
1422
1423
1424void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1425 Register object,
1426 Register result,
1427 Register scratch1,
1428 Register scratch2,
1429 Register scratch3,
1430 bool object_is_smi,
1431 Label* not_found) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001432 // Use of registers. Register result is used as a temporary.
1433 Register number_string_cache = result;
1434 Register mask = scratch3;
1435
1436 // Load the number string cache.
1437 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1438
1439 // Make the hash mask from the length of the number string cache. It
1440 // contains two elements (number and string) for each cache entry.
1441 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1442 // Divide length by two (length is a smi).
1443 __ sra(mask, mask, kSmiTagSize + 1);
1444 __ Addu(mask, mask, -1); // Make mask.
1445
1446 // Calculate the entry in the number string cache. The hash value in the
1447 // number string cache for smis is just the smi value, and the hash for
1448 // doubles is the xor of the upper and lower words. See
1449 // Heap::GetNumberStringCache.
1450 Isolate* isolate = masm->isolate();
1451 Label is_smi;
1452 Label load_result_from_cache;
1453 if (!object_is_smi) {
1454 __ JumpIfSmi(object, &is_smi);
1455 if (CpuFeatures::IsSupported(FPU)) {
1456 CpuFeatures::Scope scope(FPU);
1457 __ CheckMap(object,
1458 scratch1,
1459 Heap::kHeapNumberMapRootIndex,
1460 not_found,
danno@chromium.org40cb8782011-05-25 07:58:50 +00001461 DONT_DO_SMI_CHECK);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001462
1463 STATIC_ASSERT(8 == kDoubleSize);
1464 __ Addu(scratch1,
1465 object,
1466 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1467 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1468 __ lw(scratch1, MemOperand(scratch1, 0));
1469 __ Xor(scratch1, scratch1, Operand(scratch2));
1470 __ And(scratch1, scratch1, Operand(mask));
1471
1472 // Calculate address of entry in string cache: each entry consists
1473 // of two pointer sized fields.
1474 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1475 __ Addu(scratch1, number_string_cache, scratch1);
1476
1477 Register probe = mask;
1478 __ lw(probe,
1479 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1480 __ JumpIfSmi(probe, not_found);
1481 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1482 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1483 __ c(EQ, D, f12, f14);
1484 __ bc1t(&load_result_from_cache);
1485 __ nop(); // bc1t() requires explicit fill of branch delay slot.
1486 __ Branch(not_found);
1487 } else {
1488 // Note that there is no cache check for non-FPU case, even though
1489 // it seems there could be. May be a tiny opimization for non-FPU
1490 // cores.
1491 __ Branch(not_found);
1492 }
1493 }
1494
1495 __ bind(&is_smi);
1496 Register scratch = scratch1;
1497 __ sra(scratch, object, 1); // Shift away the tag.
1498 __ And(scratch, mask, Operand(scratch));
1499
1500 // Calculate address of entry in string cache: each entry consists
1501 // of two pointer sized fields.
1502 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1503 __ Addu(scratch, number_string_cache, scratch);
1504
1505 // Check if the entry is the smi we are looking for.
1506 Register probe = mask;
1507 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1508 __ Branch(not_found, ne, object, Operand(probe));
1509
1510 // Get the result from the cache.
1511 __ bind(&load_result_from_cache);
1512 __ lw(result,
1513 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1514
1515 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1516 1,
1517 scratch1,
1518 scratch2);
lrn@chromium.org7516f052011-03-30 08:52:27 +00001519}
1520
1521
1522void NumberToStringStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001523 Label runtime;
1524
1525 __ lw(a1, MemOperand(sp, 0));
1526
1527 // Generate code to lookup number in the number string cache.
1528 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1529 __ Addu(sp, sp, Operand(1 * kPointerSize));
1530 __ Ret();
1531
1532 __ bind(&runtime);
1533 // Handle number to string in the runtime system if not found in the cache.
1534 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00001535}
1536
1537
1538// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1539// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1540// of the comparison.
1541void CompareStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001542 Label slow; // Call builtin.
1543 Label not_smis, both_loaded_as_doubles;
1544
1545
1546 if (include_smi_compare_) {
1547 Label not_two_smis, smi_done;
1548 __ Or(a2, a1, a0);
1549 __ JumpIfNotSmi(a2, &not_two_smis);
1550 __ sra(a1, a1, 1);
1551 __ sra(a0, a0, 1);
1552 __ Subu(v0, a1, a0);
1553 __ Ret();
1554 __ bind(&not_two_smis);
1555 } else if (FLAG_debug_code) {
1556 __ Or(a2, a1, a0);
1557 __ And(a2, a2, kSmiTagMask);
1558 __ Assert(ne, "CompareStub: unexpected smi operands.",
1559 a2, Operand(zero_reg));
1560 }
1561
1562
1563 // NOTICE! This code is only reached after a smi-fast-case check, so
1564 // it is certain that at least one operand isn't a smi.
1565
1566 // Handle the case where the objects are identical. Either returns the answer
1567 // or goes to slow. Only falls through if the objects were not identical.
1568 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1569
1570 // If either is a Smi (we know that not both are), then they can only
1571 // be strictly equal if the other is a HeapNumber.
1572 STATIC_ASSERT(kSmiTag == 0);
1573 ASSERT_EQ(0, Smi::FromInt(0));
1574 __ And(t2, lhs_, Operand(rhs_));
1575 __ JumpIfNotSmi(t2, &not_smis, t0);
1576 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1577 // 1) Return the answer.
1578 // 2) Go to slow.
1579 // 3) Fall through to both_loaded_as_doubles.
1580 // 4) Jump to rhs_not_nan.
1581 // In cases 3 and 4 we have found out we were dealing with a number-number
1582 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1583 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1584 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1585 &both_loaded_as_doubles, &slow, strict_);
1586
1587 __ bind(&both_loaded_as_doubles);
1588 // f12, f14 are the double representations of the left hand side
1589 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1590 // left hand side and a0, a1 represent right hand side.
1591
1592 Isolate* isolate = masm->isolate();
1593 if (CpuFeatures::IsSupported(FPU)) {
1594 CpuFeatures::Scope scope(FPU);
1595 Label nan;
1596 __ li(t0, Operand(LESS));
1597 __ li(t1, Operand(GREATER));
1598 __ li(t2, Operand(EQUAL));
1599
1600 // Check if either rhs or lhs is NaN.
1601 __ c(UN, D, f12, f14);
1602 __ bc1t(&nan);
1603 __ nop();
1604
1605 // Check if LESS condition is satisfied. If true, move conditionally
1606 // result to v0.
1607 __ c(OLT, D, f12, f14);
1608 __ movt(v0, t0);
1609 // Use previous check to store conditionally to v0 oposite condition
1610 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1611 // check.
1612 __ movf(v0, t1);
1613 // Check if EQUAL condition is satisfied. If true, move conditionally
1614 // result to v0.
1615 __ c(EQ, D, f12, f14);
1616 __ movt(v0, t2);
1617
1618 __ Ret();
1619
1620 __ bind(&nan);
1621 // NaN comparisons always fail.
1622 // Load whatever we need in v0 to make the comparison fail.
1623 if (cc_ == lt || cc_ == le) {
1624 __ li(v0, Operand(GREATER));
1625 } else {
1626 __ li(v0, Operand(LESS));
1627 }
1628 __ Ret();
1629 } else {
1630 // Checks for NaN in the doubles we have loaded. Can return the answer or
1631 // fall through if neither is a NaN. Also binds rhs_not_nan.
1632 EmitNanCheck(masm, cc_);
1633
1634 // Compares two doubles that are not NaNs. Returns the answer.
1635 // Never falls through.
1636 EmitTwoNonNanDoubleComparison(masm, cc_);
1637 }
1638
1639 __ bind(&not_smis);
1640 // At this point we know we are dealing with two different objects,
1641 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1642 if (strict_) {
1643 // This returns non-equal for some object types, or falls through if it
1644 // was not lucky.
1645 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1646 }
1647
1648 Label check_for_symbols;
1649 Label flat_string_check;
1650 // Check for heap-number-heap-number comparison. Can jump to slow case,
1651 // or load both doubles and jump to the code that handles
1652 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1653 // In this case a2 will contain the type of lhs_.
1654 EmitCheckForTwoHeapNumbers(masm,
1655 lhs_,
1656 rhs_,
1657 &both_loaded_as_doubles,
1658 &check_for_symbols,
1659 &flat_string_check);
1660
1661 __ bind(&check_for_symbols);
1662 if (cc_ == eq && !strict_) {
1663 // Returns an answer for two symbols or two detectable objects.
1664 // Otherwise jumps to string case or not both strings case.
1665 // Assumes that a2 is the type of lhs_ on entry.
1666 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1667 }
1668
1669 // Check for both being sequential ASCII strings, and inline if that is the
1670 // case.
1671 __ bind(&flat_string_check);
1672
1673 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1674
1675 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1676 if (cc_ == eq) {
1677 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1678 lhs_,
1679 rhs_,
1680 a2,
1681 a3,
1682 t0);
1683 } else {
1684 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1685 lhs_,
1686 rhs_,
1687 a2,
1688 a3,
1689 t0,
1690 t1);
1691 }
1692 // Never falls through to here.
1693
1694 __ bind(&slow);
1695 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1696 // a1 (rhs) second.
1697 __ Push(lhs_, rhs_);
1698 // Figure out which native to call and setup the arguments.
1699 Builtins::JavaScript native;
1700 if (cc_ == eq) {
1701 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1702 } else {
1703 native = Builtins::COMPARE;
1704 int ncr; // NaN compare result.
1705 if (cc_ == lt || cc_ == le) {
1706 ncr = GREATER;
1707 } else {
1708 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1709 ncr = LESS;
1710 }
1711 __ li(a0, Operand(Smi::FromInt(ncr)));
1712 __ push(a0);
1713 }
1714
1715 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1716 // tagged as a small integer.
1717 __ InvokeBuiltin(native, JUMP_FUNCTION);
lrn@chromium.org7516f052011-03-30 08:52:27 +00001718}
1719
1720
1721// This stub does not handle the inlined cases (Smis, Booleans, undefined).
1722// The stub returns zero for false, and a non-zero value for true.
1723void ToBooleanStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001724 // This stub uses FPU instructions.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001725 CpuFeatures::Scope scope(FPU);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001726
1727 Label false_result;
1728 Label not_heap_number;
1729 Register scratch0 = t5.is(tos_) ? t3 : t5;
1730
danno@chromium.org40cb8782011-05-25 07:58:50 +00001731 // undefined -> false
1732 __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
1733 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1734
1735 // Boolean -> its value
1736 __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
1737 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1738 __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
1739 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1740 // return true if the equal condition is satisfied.
1741 __ Ret(eq, tos_, Operand(scratch0));
1742
1743 // Smis: 0 -> false, all other -> true
1744 __ And(scratch0, tos_, tos_);
1745 __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
1746 __ And(scratch0, tos_, Operand(kSmiTagMask));
1747 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1748 // return true if the not equal condition is satisfied.
1749 __ Ret(eq, scratch0, Operand(zero_reg));
1750
1751 // 'null' -> false
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001752 __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
1753 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1754
1755 // HeapNumber => false if +0, -0, or NaN.
1756 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1757 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1758 __ Branch(&not_heap_number, ne, scratch0, Operand(at));
1759
danno@chromium.org40cb8782011-05-25 07:58:50 +00001760 __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001761 __ fcmp(f12, 0.0, UEQ);
1762
1763 // "tos_" is a register, and contains a non zero value by default.
1764 // Hence we only need to overwrite "tos_" with zero to return false for
1765 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1766 __ movt(tos_, zero_reg);
1767 __ Ret();
1768
1769 __ bind(&not_heap_number);
1770
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001771 // It can be an undetectable object.
1772 // Undetectable => false.
1773 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
1774 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
1775 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
1776 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
1777
1778 // JavaScript object => true.
1779 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1780 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1781
1782 // "tos_" is a register and contains a non-zero value.
1783 // Hence we implicitly return true if the greater than
1784 // condition is satisfied.
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001785 __ Ret(gt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001786
1787 // Check for string.
1788 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1789 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1790 // "tos_" is a register and contains a non-zero value.
1791 // Hence we implicitly return true if the greater than
1792 // condition is satisfied.
1793 __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
1794
1795 // String value => false iff empty, i.e., length is zero.
1796 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1797 // If length is zero, "tos_" contains zero ==> false.
1798 // If length is not zero, "tos_" contains a non-zero value ==> true.
1799 __ Ret();
1800
1801 // Return 0 in "tos_" for false.
1802 __ bind(&false_result);
1803 __ mov(tos_, zero_reg);
1804 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +00001805}
1806
1807
danno@chromium.org40cb8782011-05-25 07:58:50 +00001808const char* UnaryOpStub::GetName() {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001809 if (name_ != NULL) return name_;
1810 const int kMaxNameLength = 100;
1811 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
1812 kMaxNameLength);
1813 if (name_ == NULL) return "OOM";
1814 const char* op_name = Token::Name(op_);
1815 const char* overwrite_name = NULL; // Make g++ happy.
1816 switch (mode_) {
1817 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1818 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1819 }
1820
1821 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
danno@chromium.org40cb8782011-05-25 07:58:50 +00001822 "UnaryOpStub_%s_%s_%s",
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001823 op_name,
1824 overwrite_name,
danno@chromium.org40cb8782011-05-25 07:58:50 +00001825 UnaryOpIC::GetName(operand_type_));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001826 return name_;
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001827}
1828
1829
1830// TODO(svenpanne): Use virtual functions instead of switch.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001831void UnaryOpStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001832 switch (operand_type_) {
danno@chromium.org40cb8782011-05-25 07:58:50 +00001833 case UnaryOpIC::UNINITIALIZED:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001834 GenerateTypeTransition(masm);
1835 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00001836 case UnaryOpIC::SMI:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001837 GenerateSmiStub(masm);
1838 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00001839 case UnaryOpIC::HEAP_NUMBER:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001840 GenerateHeapNumberStub(masm);
1841 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00001842 case UnaryOpIC::GENERIC:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001843 GenerateGenericStub(masm);
1844 break;
1845 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001846}
1847
1848
danno@chromium.org40cb8782011-05-25 07:58:50 +00001849void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001850 // Argument is in a0 and v0 at this point, so we can overwrite a0.
1851 // Push this stub's key. Although the operation and the type info are
1852 // encoded into the key, the encoding is opaque, so push them too.
1853 __ li(a2, Operand(Smi::FromInt(MinorKey())));
1854 __ li(a1, Operand(Smi::FromInt(op_)));
1855 __ li(a0, Operand(Smi::FromInt(operand_type_)));
1856
1857 __ Push(v0, a2, a1, a0);
1858
1859 __ TailCallExternalReference(
danno@chromium.org40cb8782011-05-25 07:58:50 +00001860 ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001861 masm->isolate()),
1862 4,
1863 1);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001864}
1865
1866
1867// TODO(svenpanne): Use virtual functions instead of switch.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001868void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001869 switch (op_) {
1870 case Token::SUB:
1871 GenerateSmiStubSub(masm);
1872 break;
1873 case Token::BIT_NOT:
1874 GenerateSmiStubBitNot(masm);
1875 break;
1876 default:
1877 UNREACHABLE();
1878 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001879}
1880
1881
danno@chromium.org40cb8782011-05-25 07:58:50 +00001882void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001883 Label non_smi, slow;
1884 GenerateSmiCodeSub(masm, &non_smi, &slow);
1885 __ bind(&non_smi);
1886 __ bind(&slow);
1887 GenerateTypeTransition(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001888}
1889
1890
danno@chromium.org40cb8782011-05-25 07:58:50 +00001891void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001892 Label non_smi;
1893 GenerateSmiCodeBitNot(masm, &non_smi);
1894 __ bind(&non_smi);
1895 GenerateTypeTransition(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001896}
1897
1898
danno@chromium.org40cb8782011-05-25 07:58:50 +00001899void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1900 Label* non_smi,
1901 Label* slow) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001902 __ JumpIfNotSmi(a0, non_smi);
1903
1904 // The result of negating zero or the smallest negative smi is not a smi.
1905 __ And(t0, a0, ~0x80000000);
1906 __ Branch(slow, eq, t0, Operand(zero_reg));
1907
1908 // Return '0 - value'.
1909 __ Subu(v0, zero_reg, a0);
1910 __ Ret();
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001911}
1912
1913
danno@chromium.org40cb8782011-05-25 07:58:50 +00001914void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1915 Label* non_smi) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001916 __ JumpIfNotSmi(a0, non_smi);
1917
1918 // Flip bits and revert inverted smi-tag.
1919 __ Neg(v0, a0);
1920 __ And(v0, v0, ~kSmiTagMask);
1921 __ Ret();
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001922}
1923
1924
1925// TODO(svenpanne): Use virtual functions instead of switch.
danno@chromium.org40cb8782011-05-25 07:58:50 +00001926void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001927 switch (op_) {
1928 case Token::SUB:
1929 GenerateHeapNumberStubSub(masm);
1930 break;
1931 case Token::BIT_NOT:
1932 GenerateHeapNumberStubBitNot(masm);
1933 break;
1934 default:
1935 UNREACHABLE();
1936 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001937}
1938
1939
danno@chromium.org40cb8782011-05-25 07:58:50 +00001940void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
1941 Label non_smi, slow, call_builtin;
1942 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001943 __ bind(&non_smi);
1944 GenerateHeapNumberCodeSub(masm, &slow);
1945 __ bind(&slow);
1946 GenerateTypeTransition(masm);
danno@chromium.org40cb8782011-05-25 07:58:50 +00001947 __ bind(&call_builtin);
1948 GenerateGenericCodeFallback(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001949}
1950
1951
danno@chromium.org40cb8782011-05-25 07:58:50 +00001952void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001953 Label non_smi, slow;
1954 GenerateSmiCodeBitNot(masm, &non_smi);
1955 __ bind(&non_smi);
1956 GenerateHeapNumberCodeBitNot(masm, &slow);
1957 __ bind(&slow);
1958 GenerateTypeTransition(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001959}
1960
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001961
danno@chromium.org40cb8782011-05-25 07:58:50 +00001962void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1963 Label* slow) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001964 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1965 // a0 is a heap number. Get a new heap number in a1.
1966 if (mode_ == UNARY_OVERWRITE) {
1967 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1968 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1969 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1970 } else {
1971 Label slow_allocate_heapnumber, heapnumber_allocated;
1972 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1973 __ jmp(&heapnumber_allocated);
1974
1975 __ bind(&slow_allocate_heapnumber);
1976 __ EnterInternalFrame();
1977 __ push(a0);
1978 __ CallRuntime(Runtime::kNumberAlloc, 0);
1979 __ mov(a1, v0);
1980 __ pop(a0);
1981 __ LeaveInternalFrame();
1982
1983 __ bind(&heapnumber_allocated);
1984 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1985 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1986 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1987 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1988 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1989 __ mov(v0, a1);
1990 }
1991 __ Ret();
karlklose@chromium.org83a47282011-05-11 11:54:09 +00001992}
1993
1994
danno@chromium.org40cb8782011-05-25 07:58:50 +00001995void UnaryOpStub::GenerateHeapNumberCodeBitNot(
1996 MacroAssembler* masm,
1997 Label* slow) {
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00001998 Label impossible;
1999
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002000 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2001 // Convert the heap number in a0 to an untagged integer in a1.
2002 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2003
2004 // Do the bitwise operation and check if the result fits in a smi.
2005 Label try_float;
2006 __ Neg(a1, a1);
2007 __ Addu(a2, a1, Operand(0x40000000));
2008 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2009
2010 // Tag the result as a smi and we're done.
2011 __ SmiTag(v0, a1);
2012 __ Ret();
2013
2014 // Try to store the result in a heap number.
2015 __ bind(&try_float);
2016 if (mode_ == UNARY_NO_OVERWRITE) {
2017 Label slow_allocate_heapnumber, heapnumber_allocated;
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002018 // Allocate a new heap number without zapping v0, which we need if it fails.
2019 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002020 __ jmp(&heapnumber_allocated);
2021
2022 __ bind(&slow_allocate_heapnumber);
2023 __ EnterInternalFrame();
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002024 __ push(v0); // Push the heap number, not the untagged int32.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002025 __ CallRuntime(Runtime::kNumberAlloc, 0);
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002026 __ mov(a2, v0); // Move the new heap number into a2.
2027 // Get the heap number into v0, now that the new heap number is in a2.
2028 __ pop(v0);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002029 __ LeaveInternalFrame();
2030
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002031 // Convert the heap number in v0 to an untagged integer in a1.
2032 // This can't go slow-case because it's the same number we already
2033 // converted once again.
2034 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2035 // Negate the result.
2036 __ Xor(a1, a1, -1);
2037
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002038 __ bind(&heapnumber_allocated);
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002039 __ mov(v0, a2); // Move newly allocated heap number to v0.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002040 }
2041
2042 if (CpuFeatures::IsSupported(FPU)) {
2043 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2044 CpuFeatures::Scope scope(FPU);
2045 __ mtc1(a1, f0);
2046 __ cvt_d_w(f0, f0);
2047 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2048 __ Ret();
2049 } else {
2050 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2051 // have to set up a frame.
2052 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2053 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2054 }
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00002055
2056 __ bind(&impossible);
2057 if (FLAG_debug_code) {
2058 __ stop("Incorrect assumption in bit-not stub");
2059 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002060}
2061
2062
2063// TODO(svenpanne): Use virtual functions instead of switch.
danno@chromium.org40cb8782011-05-25 07:58:50 +00002064void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002065 switch (op_) {
2066 case Token::SUB:
2067 GenerateGenericStubSub(masm);
2068 break;
2069 case Token::BIT_NOT:
2070 GenerateGenericStubBitNot(masm);
2071 break;
2072 default:
2073 UNREACHABLE();
2074 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002075}
2076
2077
danno@chromium.org40cb8782011-05-25 07:58:50 +00002078void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002079 Label non_smi, slow;
2080 GenerateSmiCodeSub(masm, &non_smi, &slow);
2081 __ bind(&non_smi);
2082 GenerateHeapNumberCodeSub(masm, &slow);
2083 __ bind(&slow);
2084 GenerateGenericCodeFallback(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002085}
2086
2087
danno@chromium.org40cb8782011-05-25 07:58:50 +00002088void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002089 Label non_smi, slow;
2090 GenerateSmiCodeBitNot(masm, &non_smi);
2091 __ bind(&non_smi);
2092 GenerateHeapNumberCodeBitNot(masm, &slow);
2093 __ bind(&slow);
2094 GenerateGenericCodeFallback(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002095}
2096
2097
danno@chromium.org40cb8782011-05-25 07:58:50 +00002098void UnaryOpStub::GenerateGenericCodeFallback(
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002099 MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002100 // Handle the slow case by jumping to the JavaScript builtin.
2101 __ push(a0);
2102 switch (op_) {
2103 case Token::SUB:
2104 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2105 break;
2106 case Token::BIT_NOT:
2107 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2108 break;
2109 default:
2110 UNREACHABLE();
2111 }
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002112}
2113
2114
danno@chromium.org40cb8782011-05-25 07:58:50 +00002115void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002116 Label get_result;
2117
2118 __ Push(a1, a0);
2119
2120 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2121 __ li(a1, Operand(Smi::FromInt(op_)));
2122 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2123 __ Push(a2, a1, a0);
2124
2125 __ TailCallExternalReference(
danno@chromium.org40cb8782011-05-25 07:58:50 +00002126 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002127 masm->isolate()),
2128 5,
2129 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00002130}
2131
2132
danno@chromium.org40cb8782011-05-25 07:58:50 +00002133void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
lrn@chromium.org7516f052011-03-30 08:52:27 +00002134 MacroAssembler* masm) {
2135 UNIMPLEMENTED();
2136}
2137
2138
danno@chromium.org40cb8782011-05-25 07:58:50 +00002139void BinaryOpStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002140 switch (operands_type_) {
danno@chromium.org40cb8782011-05-25 07:58:50 +00002141 case BinaryOpIC::UNINITIALIZED:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002142 GenerateTypeTransition(masm);
2143 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002144 case BinaryOpIC::SMI:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002145 GenerateSmiStub(masm);
2146 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002147 case BinaryOpIC::INT32:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002148 GenerateInt32Stub(masm);
2149 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002150 case BinaryOpIC::HEAP_NUMBER:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002151 GenerateHeapNumberStub(masm);
2152 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002153 case BinaryOpIC::ODDBALL:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002154 GenerateOddballStub(masm);
2155 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002156 case BinaryOpIC::BOTH_STRING:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002157 GenerateBothStringStub(masm);
2158 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002159 case BinaryOpIC::STRING:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002160 GenerateStringStub(masm);
2161 break;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002162 case BinaryOpIC::GENERIC:
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002163 GenerateGeneric(masm);
2164 break;
2165 default:
2166 UNREACHABLE();
2167 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00002168}
2169
2170
danno@chromium.org40cb8782011-05-25 07:58:50 +00002171const char* BinaryOpStub::GetName() {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002172 if (name_ != NULL) return name_;
2173 const int kMaxNameLength = 100;
2174 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
2175 kMaxNameLength);
2176 if (name_ == NULL) return "OOM";
2177 const char* op_name = Token::Name(op_);
2178 const char* overwrite_name;
2179 switch (mode_) {
2180 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2181 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2182 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2183 default: overwrite_name = "UnknownOverwrite"; break;
2184 }
2185
2186 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
danno@chromium.org40cb8782011-05-25 07:58:50 +00002187 "BinaryOpStub_%s_%s_%s",
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002188 op_name,
2189 overwrite_name,
danno@chromium.org40cb8782011-05-25 07:58:50 +00002190 BinaryOpIC::GetName(operands_type_));
lrn@chromium.org7516f052011-03-30 08:52:27 +00002191 return name_;
2192}
2193
2194
2195
danno@chromium.org40cb8782011-05-25 07:58:50 +00002196void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002197 Register left = a1;
2198 Register right = a0;
2199
2200 Register scratch1 = t0;
2201 Register scratch2 = t1;
2202
2203 ASSERT(right.is(a0));
2204 STATIC_ASSERT(kSmiTag == 0);
2205
2206 Label not_smi_result;
2207 switch (op_) {
2208 case Token::ADD:
2209 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2210 __ RetOnNoOverflow(scratch1);
2211 // No need to revert anything - right and left are intact.
2212 break;
2213 case Token::SUB:
2214 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2215 __ RetOnNoOverflow(scratch1);
2216 // No need to revert anything - right and left are intact.
2217 break;
2218 case Token::MUL: {
2219 // Remove tag from one of the operands. This way the multiplication result
2220 // will be a smi if it fits the smi range.
2221 __ SmiUntag(scratch1, right);
2222 // Do multiplication.
2223 // lo = lower 32 bits of scratch1 * left.
2224 // hi = higher 32 bits of scratch1 * left.
2225 __ Mult(left, scratch1);
2226 // Check for overflowing the smi range - no overflow if higher 33 bits of
2227 // the result are identical.
2228 __ mflo(scratch1);
2229 __ mfhi(scratch2);
2230 __ sra(scratch1, scratch1, 31);
2231 __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2232 // Go slow on zero result to handle -0.
2233 __ mflo(v0);
2234 __ Ret(ne, v0, Operand(zero_reg));
2235 // We need -0 if we were multiplying a negative number with 0 to get 0.
2236 // We know one of them was zero.
2237 __ Addu(scratch2, right, left);
2238 Label skip;
2239 // ARM uses the 'pl' condition, which is 'ge'.
2240 // Negating it results in 'lt'.
2241 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2242 ASSERT(Smi::FromInt(0) == 0);
2243 __ mov(v0, zero_reg);
2244 __ Ret(); // Return smi 0 if the non-zero one was positive.
2245 __ bind(&skip);
2246 // We fall through here if we multiplied a negative number with 0, because
2247 // that would mean we should produce -0.
2248 }
2249 break;
2250 case Token::DIV: {
2251 Label done;
2252 __ SmiUntag(scratch2, right);
2253 __ SmiUntag(scratch1, left);
2254 __ Div(scratch1, scratch2);
2255 // A minor optimization: div may be calculated asynchronously, so we check
2256 // for division by zero before getting the result.
2257 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2258 // If the result is 0, we need to make sure the dividsor (right) is
2259 // positive, otherwise it is a -0 case.
2260 // Quotient is in 'lo', remainder is in 'hi'.
2261 // Check for no remainder first.
2262 __ mfhi(scratch1);
2263 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2264 __ mflo(scratch1);
2265 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2266 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2267 __ bind(&done);
2268 // Check that the signed result fits in a Smi.
2269 __ Addu(scratch2, scratch1, Operand(0x40000000));
2270 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2271 __ SmiTag(v0, scratch1);
2272 __ Ret();
2273 }
2274 break;
2275 case Token::MOD: {
2276 Label done;
2277 __ SmiUntag(scratch2, right);
2278 __ SmiUntag(scratch1, left);
2279 __ Div(scratch1, scratch2);
2280 // A minor optimization: div may be calculated asynchronously, so we check
2281 // for division by 0 before calling mfhi.
2282 // Check for zero on the right hand side.
2283 __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2284 // If the result is 0, we need to make sure the dividend (left) is
2285 // positive (or 0), otherwise it is a -0 case.
2286 // Remainder is in 'hi'.
2287 __ mfhi(scratch2);
2288 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2289 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2290 __ bind(&done);
2291 // Check that the signed result fits in a Smi.
2292 __ Addu(scratch1, scratch2, Operand(0x40000000));
2293 __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2294 __ SmiTag(v0, scratch2);
2295 __ Ret();
2296 }
2297 break;
2298 case Token::BIT_OR:
2299 __ Or(v0, left, Operand(right));
2300 __ Ret();
2301 break;
2302 case Token::BIT_AND:
2303 __ And(v0, left, Operand(right));
2304 __ Ret();
2305 break;
2306 case Token::BIT_XOR:
2307 __ Xor(v0, left, Operand(right));
2308 __ Ret();
2309 break;
2310 case Token::SAR:
2311 // Remove tags from right operand.
2312 __ GetLeastBitsFromSmi(scratch1, right, 5);
2313 __ srav(scratch1, left, scratch1);
2314 // Smi tag result.
2315 __ And(v0, scratch1, Operand(~kSmiTagMask));
2316 __ Ret();
2317 break;
2318 case Token::SHR:
2319 // Remove tags from operands. We can't do this on a 31 bit number
2320 // because then the 0s get shifted into bit 30 instead of bit 31.
2321 __ SmiUntag(scratch1, left);
2322 __ GetLeastBitsFromSmi(scratch2, right, 5);
2323 __ srlv(v0, scratch1, scratch2);
2324 // Unsigned shift is not allowed to produce a negative number, so
2325 // check the sign bit and the sign bit after Smi tagging.
2326 __ And(scratch1, v0, Operand(0xc0000000));
2327 __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2328 // Smi tag result.
2329 __ SmiTag(v0);
2330 __ Ret();
2331 break;
2332 case Token::SHL:
2333 // Remove tags from operands.
2334 __ SmiUntag(scratch1, left);
2335 __ GetLeastBitsFromSmi(scratch2, right, 5);
2336 __ sllv(scratch1, scratch1, scratch2);
2337 // Check that the signed result fits in a Smi.
2338 __ Addu(scratch2, scratch1, Operand(0x40000000));
2339 __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2340 __ SmiTag(v0, scratch1);
2341 __ Ret();
2342 break;
2343 default:
2344 UNREACHABLE();
2345 }
2346 __ bind(&not_smi_result);
lrn@chromium.org7516f052011-03-30 08:52:27 +00002347}
2348
2349
danno@chromium.org40cb8782011-05-25 07:58:50 +00002350void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2351 bool smi_operands,
2352 Label* not_numbers,
2353 Label* gc_required) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002354 Register left = a1;
2355 Register right = a0;
2356 Register scratch1 = t3;
2357 Register scratch2 = t5;
2358 Register scratch3 = t0;
2359
2360 ASSERT(smi_operands || (not_numbers != NULL));
2361 if (smi_operands && FLAG_debug_code) {
2362 __ AbortIfNotSmi(left);
2363 __ AbortIfNotSmi(right);
2364 }
2365
2366 Register heap_number_map = t2;
2367 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2368
2369 switch (op_) {
2370 case Token::ADD:
2371 case Token::SUB:
2372 case Token::MUL:
2373 case Token::DIV:
2374 case Token::MOD: {
2375 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2376 // depending on whether FPU is available or not.
2377 FloatingPointHelper::Destination destination =
2378 CpuFeatures::IsSupported(FPU) &&
2379 op_ != Token::MOD ?
2380 FloatingPointHelper::kFPURegisters :
2381 FloatingPointHelper::kCoreRegisters;
2382
2383 // Allocate new heap number for result.
2384 Register result = s0;
2385 GenerateHeapResultAllocation(
2386 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2387
2388 // Load the operands.
2389 if (smi_operands) {
2390 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2391 } else {
2392 FloatingPointHelper::LoadOperands(masm,
2393 destination,
2394 heap_number_map,
2395 scratch1,
2396 scratch2,
2397 not_numbers);
2398 }
2399
2400 // Calculate the result.
2401 if (destination == FloatingPointHelper::kFPURegisters) {
2402 // Using FPU registers:
2403 // f12: Left value.
2404 // f14: Right value.
2405 CpuFeatures::Scope scope(FPU);
2406 switch (op_) {
2407 case Token::ADD:
2408 __ add_d(f10, f12, f14);
2409 break;
2410 case Token::SUB:
2411 __ sub_d(f10, f12, f14);
2412 break;
2413 case Token::MUL:
2414 __ mul_d(f10, f12, f14);
2415 break;
2416 case Token::DIV:
2417 __ div_d(f10, f12, f14);
2418 break;
2419 default:
2420 UNREACHABLE();
2421 }
2422
2423 // ARM uses a workaround here because of the unaligned HeapNumber
2424 // kValueOffset. On MIPS this workaround is built into sdc1 so
2425 // there's no point in generating even more instructions.
2426 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2427 __ mov(v0, result);
2428 __ Ret();
2429 } else {
2430 // Call the C function to handle the double operation.
2431 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2432 op_,
2433 result,
2434 scratch1);
2435 if (FLAG_debug_code) {
2436 __ stop("Unreachable code.");
2437 }
2438 }
2439 break;
2440 }
2441 case Token::BIT_OR:
2442 case Token::BIT_XOR:
2443 case Token::BIT_AND:
2444 case Token::SAR:
2445 case Token::SHR:
2446 case Token::SHL: {
2447 if (smi_operands) {
2448 __ SmiUntag(a3, left);
2449 __ SmiUntag(a2, right);
2450 } else {
2451 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2452 FloatingPointHelper::ConvertNumberToInt32(masm,
2453 left,
2454 a3,
2455 heap_number_map,
2456 scratch1,
2457 scratch2,
2458 scratch3,
2459 f0,
2460 not_numbers);
2461 FloatingPointHelper::ConvertNumberToInt32(masm,
2462 right,
2463 a2,
2464 heap_number_map,
2465 scratch1,
2466 scratch2,
2467 scratch3,
2468 f0,
2469 not_numbers);
2470 }
2471 Label result_not_a_smi;
2472 switch (op_) {
2473 case Token::BIT_OR:
2474 __ Or(a2, a3, Operand(a2));
2475 break;
2476 case Token::BIT_XOR:
2477 __ Xor(a2, a3, Operand(a2));
2478 break;
2479 case Token::BIT_AND:
2480 __ And(a2, a3, Operand(a2));
2481 break;
2482 case Token::SAR:
2483 // Use only the 5 least significant bits of the shift count.
2484 __ GetLeastBitsFromInt32(a2, a2, 5);
2485 __ srav(a2, a3, a2);
2486 break;
2487 case Token::SHR:
2488 // Use only the 5 least significant bits of the shift count.
2489 __ GetLeastBitsFromInt32(a2, a2, 5);
2490 __ srlv(a2, a3, a2);
2491 // SHR is special because it is required to produce a positive answer.
2492 // The code below for writing into heap numbers isn't capable of
2493 // writing the register as an unsigned int so we go to slow case if we
2494 // hit this case.
2495 if (CpuFeatures::IsSupported(FPU)) {
2496 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2497 } else {
2498 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2499 }
2500 break;
2501 case Token::SHL:
2502 // Use only the 5 least significant bits of the shift count.
2503 __ GetLeastBitsFromInt32(a2, a2, 5);
2504 __ sllv(a2, a3, a2);
2505 break;
2506 default:
2507 UNREACHABLE();
2508 }
2509 // Check that the *signed* result fits in a smi.
2510 __ Addu(a3, a2, Operand(0x40000000));
2511 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2512 __ SmiTag(v0, a2);
2513 __ Ret();
2514
2515 // Allocate new heap number for result.
2516 __ bind(&result_not_a_smi);
2517 Register result = t1;
2518 if (smi_operands) {
2519 __ AllocateHeapNumber(
2520 result, scratch1, scratch2, heap_number_map, gc_required);
2521 } else {
2522 GenerateHeapResultAllocation(
2523 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2524 }
2525
2526 // a2: Answer as signed int32.
2527 // t1: Heap number to write answer into.
2528
2529 // Nothing can go wrong now, so move the heap number to v0, which is the
2530 // result.
2531 __ mov(v0, t1);
2532
2533 if (CpuFeatures::IsSupported(FPU)) {
2534 // Convert the int32 in a2 to the heap number in a0. As
2535 // mentioned above SHR needs to always produce a positive result.
2536 CpuFeatures::Scope scope(FPU);
2537 __ mtc1(a2, f0);
2538 if (op_ == Token::SHR) {
2539 __ Cvt_d_uw(f0, f0);
2540 } else {
2541 __ cvt_d_w(f0, f0);
2542 }
2543 // ARM uses a workaround here because of the unaligned HeapNumber
2544 // kValueOffset. On MIPS this workaround is built into sdc1 so
2545 // there's no point in generating even more instructions.
2546 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2547 __ Ret();
2548 } else {
2549 // Tail call that writes the int32 in a2 to the heap number in v0, using
2550 // a3 and a0 as scratch. v0 is preserved and returned.
2551 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2552 __ TailCallStub(&stub);
2553 }
2554 break;
2555 }
2556 default:
2557 UNREACHABLE();
2558 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00002559}
2560
2561
2562// Generate the smi code. If the operation on smis are successful this return is
2563// generated. If the result is not a smi and heap number allocation is not
2564// requested the code falls through. If number allocation is requested but a
2565// heap number cannot be allocated the code jumps to the lable gc_required.
danno@chromium.org40cb8782011-05-25 07:58:50 +00002566void BinaryOpStub::GenerateSmiCode(
2567 MacroAssembler* masm,
karlklose@chromium.org83a47282011-05-11 11:54:09 +00002568 Label* use_runtime,
lrn@chromium.org7516f052011-03-30 08:52:27 +00002569 Label* gc_required,
2570 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002571 Label not_smis;
2572
2573 Register left = a1;
2574 Register right = a0;
2575 Register scratch1 = t3;
2576 Register scratch2 = t5;
2577
2578 // Perform combined smi check on both operands.
2579 __ Or(scratch1, left, Operand(right));
2580 STATIC_ASSERT(kSmiTag == 0);
2581 __ JumpIfNotSmi(scratch1, &not_smis);
2582
2583 // If the smi-smi operation results in a smi return is generated.
2584 GenerateSmiSmiOperation(masm);
2585
2586 // If heap number results are possible generate the result in an allocated
2587 // heap number.
2588 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2589 GenerateFPOperation(masm, true, use_runtime, gc_required);
2590 }
2591 __ bind(&not_smis);
lrn@chromium.org7516f052011-03-30 08:52:27 +00002592}
2593
2594
danno@chromium.org40cb8782011-05-25 07:58:50 +00002595void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002596 Label not_smis, call_runtime;
2597
danno@chromium.org40cb8782011-05-25 07:58:50 +00002598 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2599 result_type_ == BinaryOpIC::SMI) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002600 // Only allow smi results.
2601 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2602 } else {
2603 // Allow heap number result and don't make a transition if a heap number
2604 // cannot be allocated.
2605 GenerateSmiCode(masm,
2606 &call_runtime,
2607 &call_runtime,
2608 ALLOW_HEAPNUMBER_RESULTS);
2609 }
2610
2611 // Code falls through if the result is not returned as either a smi or heap
2612 // number.
2613 GenerateTypeTransition(masm);
2614
2615 __ bind(&call_runtime);
2616 GenerateCallRuntime(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00002617}
2618
2619
danno@chromium.org40cb8782011-05-25 07:58:50 +00002620void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2621 ASSERT(operands_type_ == BinaryOpIC::STRING);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002622 // Try to add arguments as strings, otherwise, transition to the generic
danno@chromium.org40cb8782011-05-25 07:58:50 +00002623 // BinaryOpIC type.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002624 GenerateAddStrings(masm);
2625 GenerateTypeTransition(masm);
2626}
2627
2628
danno@chromium.org40cb8782011-05-25 07:58:50 +00002629void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002630 Label call_runtime;
danno@chromium.org40cb8782011-05-25 07:58:50 +00002631 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002632 ASSERT(op_ == Token::ADD);
2633 // If both arguments are strings, call the string add stub.
2634 // Otherwise, do a transition.
2635
2636 // Registers containing left and right operands respectively.
2637 Register left = a1;
2638 Register right = a0;
2639
2640 // Test if left operand is a string.
2641 __ JumpIfSmi(left, &call_runtime);
2642 __ GetObjectType(left, a2, a2);
2643 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2644
2645 // Test if right operand is a string.
2646 __ JumpIfSmi(right, &call_runtime);
2647 __ GetObjectType(right, a2, a2);
2648 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2649
2650 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2651 GenerateRegisterArgsPush(masm);
2652 __ TailCallStub(&string_add_stub);
2653
2654 __ bind(&call_runtime);
2655 GenerateTypeTransition(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00002656}
2657
2658
danno@chromium.org40cb8782011-05-25 07:58:50 +00002659void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2660 ASSERT(operands_type_ == BinaryOpIC::INT32);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002661
2662 Register left = a1;
2663 Register right = a0;
2664 Register scratch1 = t3;
2665 Register scratch2 = t5;
2666 FPURegister double_scratch = f0;
2667 FPURegister single_scratch = f6;
2668
2669 Register heap_number_result = no_reg;
2670 Register heap_number_map = t2;
2671 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2672
2673 Label call_runtime;
2674 // Labels for type transition, used for wrong input or output types.
2675 // Both label are currently actually bound to the same position. We use two
2676 // different label to differentiate the cause leading to type transition.
2677 Label transition;
2678
2679 // Smi-smi fast case.
2680 Label skip;
2681 __ Or(scratch1, left, right);
2682 __ JumpIfNotSmi(scratch1, &skip);
2683 GenerateSmiSmiOperation(masm);
2684 // Fall through if the result is not a smi.
2685 __ bind(&skip);
2686
2687 switch (op_) {
2688 case Token::ADD:
2689 case Token::SUB:
2690 case Token::MUL:
2691 case Token::DIV:
2692 case Token::MOD: {
2693 // Load both operands and check that they are 32-bit integer.
2694 // Jump to type transition if they are not. The registers a0 and a1 (right
2695 // and left) are preserved for the runtime call.
2696 FloatingPointHelper::Destination destination =
2697 CpuFeatures::IsSupported(FPU) &&
2698 op_ != Token::MOD ?
2699 FloatingPointHelper::kFPURegisters :
2700 FloatingPointHelper::kCoreRegisters;
2701
2702 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2703 right,
2704 destination,
2705 f14,
2706 a2,
2707 a3,
2708 heap_number_map,
2709 scratch1,
2710 scratch2,
2711 f2,
2712 &transition);
2713 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2714 left,
2715 destination,
2716 f12,
2717 t0,
2718 t1,
2719 heap_number_map,
2720 scratch1,
2721 scratch2,
2722 f2,
2723 &transition);
2724
2725 if (destination == FloatingPointHelper::kFPURegisters) {
2726 CpuFeatures::Scope scope(FPU);
2727 Label return_heap_number;
2728 switch (op_) {
2729 case Token::ADD:
2730 __ add_d(f10, f12, f14);
2731 break;
2732 case Token::SUB:
2733 __ sub_d(f10, f12, f14);
2734 break;
2735 case Token::MUL:
2736 __ mul_d(f10, f12, f14);
2737 break;
2738 case Token::DIV:
2739 __ div_d(f10, f12, f14);
2740 break;
2741 default:
2742 UNREACHABLE();
2743 }
2744
2745 if (op_ != Token::DIV) {
2746 // These operations produce an integer result.
2747 // Try to return a smi if we can.
2748 // Otherwise return a heap number if allowed, or jump to type
2749 // transition.
2750
2751 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
2752 // On MIPS a lot of things cannot be implemented the same way so right
2753 // now it makes a lot more sense to just do things manually.
2754
2755 // Save FCSR.
2756 __ cfc1(scratch1, FCSR);
2757 // Disable FPU exceptions.
2758 __ ctc1(zero_reg, FCSR);
2759 __ trunc_w_d(single_scratch, f10);
2760 // Retrieve FCSR.
2761 __ cfc1(scratch2, FCSR);
2762 // Restore FCSR.
2763 __ ctc1(scratch1, FCSR);
2764
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00002765 // Check for inexact conversion or exception.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002766 __ And(scratch2, scratch2, kFCSRFlagMask);
2767
danno@chromium.org40cb8782011-05-25 07:58:50 +00002768 if (result_type_ <= BinaryOpIC::INT32) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002769 // If scratch2 != 0, result does not fit in a 32-bit integer.
2770 __ Branch(&transition, ne, scratch2, Operand(zero_reg));
2771 }
2772
2773 // Check if the result fits in a smi.
2774 __ mfc1(scratch1, single_scratch);
2775 __ Addu(scratch2, scratch1, Operand(0x40000000));
2776 // If not try to return a heap number.
2777 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2778 // Check for minus zero. Return heap number for minus zero.
2779 Label not_zero;
2780 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2781 __ mfc1(scratch2, f11);
2782 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2783 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2784 __ bind(&not_zero);
2785
2786 // Tag the result and return.
2787 __ SmiTag(v0, scratch1);
2788 __ Ret();
2789 } else {
2790 // DIV just falls through to allocating a heap number.
2791 }
2792
danno@chromium.org40cb8782011-05-25 07:58:50 +00002793 if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2794 : BinaryOpIC::INT32) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002795 __ bind(&return_heap_number);
2796 // We are using FPU registers so s0 is available.
2797 heap_number_result = s0;
2798 GenerateHeapResultAllocation(masm,
2799 heap_number_result,
2800 heap_number_map,
2801 scratch1,
2802 scratch2,
2803 &call_runtime);
2804 __ mov(v0, heap_number_result);
2805 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2806 __ Ret();
2807 }
2808
2809 // A DIV operation expecting an integer result falls through
2810 // to type transition.
2811
2812 } else {
2813 // We preserved a0 and a1 to be able to call runtime.
2814 // Save the left value on the stack.
2815 __ Push(t1, t0);
2816
2817 Label pop_and_call_runtime;
2818
2819 // Allocate a heap number to store the result.
2820 heap_number_result = s0;
2821 GenerateHeapResultAllocation(masm,
2822 heap_number_result,
2823 heap_number_map,
2824 scratch1,
2825 scratch2,
2826 &pop_and_call_runtime);
2827
2828 // Load the left value from the value saved on the stack.
2829 __ Pop(a1, a0);
2830
2831 // Call the C function to handle the double operation.
2832 FloatingPointHelper::CallCCodeForDoubleOperation(
2833 masm, op_, heap_number_result, scratch1);
2834 if (FLAG_debug_code) {
2835 __ stop("Unreachable code.");
2836 }
2837
2838 __ bind(&pop_and_call_runtime);
2839 __ Drop(2);
2840 __ Branch(&call_runtime);
2841 }
2842
2843 break;
2844 }
2845
2846 case Token::BIT_OR:
2847 case Token::BIT_XOR:
2848 case Token::BIT_AND:
2849 case Token::SAR:
2850 case Token::SHR:
2851 case Token::SHL: {
2852 Label return_heap_number;
2853 Register scratch3 = t1;
2854 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
2855 // registers a0 and a1 (right and left) are preserved for the runtime
2856 // call.
2857 FloatingPointHelper::LoadNumberAsInt32(masm,
2858 left,
2859 a3,
2860 heap_number_map,
2861 scratch1,
2862 scratch2,
2863 scratch3,
2864 f0,
2865 &transition);
2866 FloatingPointHelper::LoadNumberAsInt32(masm,
2867 right,
2868 a2,
2869 heap_number_map,
2870 scratch1,
2871 scratch2,
2872 scratch3,
2873 f0,
2874 &transition);
2875
2876 // The ECMA-262 standard specifies that, for shift operations, only the
2877 // 5 least significant bits of the shift value should be used.
2878 switch (op_) {
2879 case Token::BIT_OR:
2880 __ Or(a2, a3, Operand(a2));
2881 break;
2882 case Token::BIT_XOR:
2883 __ Xor(a2, a3, Operand(a2));
2884 break;
2885 case Token::BIT_AND:
2886 __ And(a2, a3, Operand(a2));
2887 break;
2888 case Token::SAR:
2889 __ And(a2, a2, Operand(0x1f));
2890 __ srav(a2, a3, a2);
2891 break;
2892 case Token::SHR:
2893 __ And(a2, a2, Operand(0x1f));
2894 __ srlv(a2, a3, a2);
2895 // SHR is special because it is required to produce a positive answer.
2896 // We only get a negative result if the shift value (a2) is 0.
2897 // This result cannot be respresented as a signed 32-bit integer, try
2898 // to return a heap number if we can.
2899 // The non FPU code does not support this special case, so jump to
2900 // runtime if we don't support it.
2901 if (CpuFeatures::IsSupported(FPU)) {
danno@chromium.org40cb8782011-05-25 07:58:50 +00002902 __ Branch((result_type_ <= BinaryOpIC::INT32)
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002903 ? &transition
2904 : &return_heap_number,
2905 lt,
2906 a2,
2907 Operand(zero_reg));
2908 } else {
danno@chromium.org40cb8782011-05-25 07:58:50 +00002909 __ Branch((result_type_ <= BinaryOpIC::INT32)
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002910 ? &transition
2911 : &call_runtime,
2912 lt,
2913 a2,
2914 Operand(zero_reg));
2915 }
2916 break;
2917 case Token::SHL:
2918 __ And(a2, a2, Operand(0x1f));
2919 __ sllv(a2, a3, a2);
2920 break;
2921 default:
2922 UNREACHABLE();
2923 }
2924
2925 // Check if the result fits in a smi.
2926 __ Addu(scratch1, a2, Operand(0x40000000));
2927 // If not try to return a heap number. (We know the result is an int32.)
2928 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
2929 // Tag the result and return.
2930 __ SmiTag(v0, a2);
2931 __ Ret();
2932
2933 __ bind(&return_heap_number);
2934 heap_number_result = t1;
2935 GenerateHeapResultAllocation(masm,
2936 heap_number_result,
2937 heap_number_map,
2938 scratch1,
2939 scratch2,
2940 &call_runtime);
2941
2942 if (CpuFeatures::IsSupported(FPU)) {
2943 CpuFeatures::Scope scope(FPU);
2944
2945 if (op_ != Token::SHR) {
2946 // Convert the result to a floating point value.
2947 __ mtc1(a2, double_scratch);
2948 __ cvt_d_w(double_scratch, double_scratch);
2949 } else {
2950 // The result must be interpreted as an unsigned 32-bit integer.
2951 __ mtc1(a2, double_scratch);
2952 __ Cvt_d_uw(double_scratch, double_scratch);
2953 }
2954
2955 // Store the result.
2956 __ mov(v0, heap_number_result);
2957 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
2958 __ Ret();
2959 } else {
2960 // Tail call that writes the int32 in a2 to the heap number in v0, using
2961 // a3 and a1 as scratch. v0 is preserved and returned.
2962 __ mov(a0, t1);
2963 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
2964 __ TailCallStub(&stub);
2965 }
2966
2967 break;
2968 }
2969
2970 default:
2971 UNREACHABLE();
2972 }
2973
2974 if (transition.is_linked()) {
2975 __ bind(&transition);
2976 GenerateTypeTransition(masm);
2977 }
2978
2979 __ bind(&call_runtime);
2980 GenerateCallRuntime(masm);
2981}
2982
2983
danno@chromium.org40cb8782011-05-25 07:58:50 +00002984void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00002985 Label call_runtime;
2986
2987 if (op_ == Token::ADD) {
2988 // Handle string addition here, because it is the only operation
2989 // that does not do a ToNumber conversion on the operands.
2990 GenerateAddStrings(masm);
2991 }
2992
2993 // Convert oddball arguments to numbers.
2994 Label check, done;
2995 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2996 __ Branch(&check, ne, a1, Operand(t0));
2997 if (Token::IsBitOp(op_)) {
2998 __ li(a1, Operand(Smi::FromInt(0)));
2999 } else {
3000 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3001 }
3002 __ jmp(&done);
3003 __ bind(&check);
3004 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3005 __ Branch(&done, ne, a0, Operand(t0));
3006 if (Token::IsBitOp(op_)) {
3007 __ li(a0, Operand(Smi::FromInt(0)));
3008 } else {
3009 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3010 }
3011 __ bind(&done);
3012
3013 GenerateHeapNumberStub(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003014}
3015
3016
danno@chromium.org40cb8782011-05-25 07:58:50 +00003017void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003018 Label call_runtime;
3019 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3020
3021 __ bind(&call_runtime);
3022 GenerateCallRuntime(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003023}
3024
3025
danno@chromium.org40cb8782011-05-25 07:58:50 +00003026void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003027 Label call_runtime, call_string_add_or_runtime;
3028
3029 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3030
3031 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3032
3033 __ bind(&call_string_add_or_runtime);
3034 if (op_ == Token::ADD) {
3035 GenerateAddStrings(masm);
3036 }
3037
3038 __ bind(&call_runtime);
3039 GenerateCallRuntime(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003040}
3041
3042
danno@chromium.org40cb8782011-05-25 07:58:50 +00003043void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003044 ASSERT(op_ == Token::ADD);
3045 Label left_not_string, call_runtime;
3046
3047 Register left = a1;
3048 Register right = a0;
3049
3050 // Check if left argument is a string.
3051 __ JumpIfSmi(left, &left_not_string);
3052 __ GetObjectType(left, a2, a2);
3053 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3054
3055 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3056 GenerateRegisterArgsPush(masm);
3057 __ TailCallStub(&string_add_left_stub);
3058
3059 // Left operand is not a string, test right.
3060 __ bind(&left_not_string);
3061 __ JumpIfSmi(right, &call_runtime);
3062 __ GetObjectType(right, a2, a2);
3063 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3064
3065 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3066 GenerateRegisterArgsPush(masm);
3067 __ TailCallStub(&string_add_right_stub);
3068
3069 // At least one argument is not a string.
3070 __ bind(&call_runtime);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003071}
3072
3073
danno@chromium.org40cb8782011-05-25 07:58:50 +00003074void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003075 GenerateRegisterArgsPush(masm);
3076 switch (op_) {
3077 case Token::ADD:
3078 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3079 break;
3080 case Token::SUB:
3081 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3082 break;
3083 case Token::MUL:
3084 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3085 break;
3086 case Token::DIV:
3087 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3088 break;
3089 case Token::MOD:
3090 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3091 break;
3092 case Token::BIT_OR:
3093 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3094 break;
3095 case Token::BIT_AND:
3096 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3097 break;
3098 case Token::BIT_XOR:
3099 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3100 break;
3101 case Token::SAR:
3102 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3103 break;
3104 case Token::SHR:
3105 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3106 break;
3107 case Token::SHL:
3108 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3109 break;
3110 default:
3111 UNREACHABLE();
3112 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00003113}
3114
3115
danno@chromium.org40cb8782011-05-25 07:58:50 +00003116void BinaryOpStub::GenerateHeapResultAllocation(
lrn@chromium.org7516f052011-03-30 08:52:27 +00003117 MacroAssembler* masm,
3118 Register result,
3119 Register heap_number_map,
3120 Register scratch1,
3121 Register scratch2,
3122 Label* gc_required) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003123
3124 // Code below will scratch result if allocation fails. To keep both arguments
3125 // intact for the runtime call result cannot be one of these.
3126 ASSERT(!result.is(a0) && !result.is(a1));
3127
3128 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3129 Label skip_allocation, allocated;
3130 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3131 // If the overwritable operand is already an object, we skip the
3132 // allocation of a heap number.
3133 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3134 // Allocate a heap number for the result.
3135 __ AllocateHeapNumber(
3136 result, scratch1, scratch2, heap_number_map, gc_required);
3137 __ Branch(&allocated);
3138 __ bind(&skip_allocation);
3139 // Use object holding the overwritable operand for result.
3140 __ mov(result, overwritable_operand);
3141 __ bind(&allocated);
3142 } else {
3143 ASSERT(mode_ == NO_OVERWRITE);
3144 __ AllocateHeapNumber(
3145 result, scratch1, scratch2, heap_number_map, gc_required);
3146 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00003147}
3148
3149
danno@chromium.org40cb8782011-05-25 07:58:50 +00003150void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003151 __ Push(a1, a0);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003152}
3153
3154
3155
3156void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003157 // Untagged case: double input in f4, double result goes
3158 // into f4.
3159 // Tagged case: tagged input on top of stack and in a0,
3160 // tagged result (heap number) goes into v0.
3161
3162 Label input_not_smi;
3163 Label loaded;
3164 Label calculate;
3165 Label invalid_cache;
3166 const Register scratch0 = t5;
3167 const Register scratch1 = t3;
3168 const Register cache_entry = a0;
3169 const bool tagged = (argument_type_ == TAGGED);
3170
3171 if (CpuFeatures::IsSupported(FPU)) {
3172 CpuFeatures::Scope scope(FPU);
3173
3174 if (tagged) {
3175 // Argument is a number and is on stack and in a0.
3176 // Load argument and check if it is a smi.
3177 __ JumpIfNotSmi(a0, &input_not_smi);
3178
3179 // Input is a smi. Convert to double and load the low and high words
3180 // of the double into a2, a3.
3181 __ sra(t0, a0, kSmiTagSize);
3182 __ mtc1(t0, f4);
3183 __ cvt_d_w(f4, f4);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003184 __ Move(a2, a3, f4);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003185 __ Branch(&loaded);
3186
3187 __ bind(&input_not_smi);
3188 // Check if input is a HeapNumber.
3189 __ CheckMap(a0,
3190 a1,
3191 Heap::kHeapNumberMapRootIndex,
3192 &calculate,
danno@chromium.org40cb8782011-05-25 07:58:50 +00003193 DONT_DO_SMI_CHECK);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003194 // Input is a HeapNumber. Store the
3195 // low and high words into a2, a3.
3196 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3197 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3198 } else {
3199 // Input is untagged double in f4. Output goes to f4.
danno@chromium.org40cb8782011-05-25 07:58:50 +00003200 __ Move(a2, a3, f4);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003201 }
3202 __ bind(&loaded);
3203 // a2 = low 32 bits of double value.
3204 // a3 = high 32 bits of double value.
3205 // Compute hash (the shifts are arithmetic):
3206 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3207 __ Xor(a1, a2, a3);
3208 __ sra(t0, a1, 16);
3209 __ Xor(a1, a1, t0);
3210 __ sra(t0, a1, 8);
3211 __ Xor(a1, a1, t0);
3212 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3213 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3214
3215 // a2 = low 32 bits of double value.
3216 // a3 = high 32 bits of double value.
3217 // a1 = TranscendentalCache::hash(double value).
3218 __ li(cache_entry, Operand(
3219 ExternalReference::transcendental_cache_array_address(
3220 masm->isolate())));
3221 // a0 points to cache array.
3222 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3223 Isolate::Current()->transcendental_cache()->caches_[0])));
3224 // a0 points to the cache for the type type_.
3225 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3226 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3227
3228#ifdef DEBUG
3229 // Check that the layout of cache elements match expectations.
3230 { TranscendentalCache::SubCache::Element test_elem[2];
3231 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3232 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3233 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3234 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3235 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3236 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3237 CHECK_EQ(0, elem_in0 - elem_start);
3238 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3239 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3240 }
3241#endif
3242
3243 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3244 __ sll(t0, a1, 1);
3245 __ Addu(a1, a1, t0);
3246 __ sll(t0, a1, 2);
3247 __ Addu(cache_entry, cache_entry, t0);
3248
3249 // Check if cache matches: Double value is stored in uint32_t[2] array.
3250 __ lw(t0, MemOperand(cache_entry, 0));
3251 __ lw(t1, MemOperand(cache_entry, 4));
3252 __ lw(t2, MemOperand(cache_entry, 8));
3253 __ Addu(cache_entry, cache_entry, 12);
3254 __ Branch(&calculate, ne, a2, Operand(t0));
3255 __ Branch(&calculate, ne, a3, Operand(t1));
3256 // Cache hit. Load result, cleanup and return.
3257 if (tagged) {
3258 // Pop input value from stack and load result into v0.
3259 __ Drop(1);
3260 __ mov(v0, t2);
3261 } else {
3262 // Load result into f4.
3263 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3264 }
3265 __ Ret();
3266 } // if (CpuFeatures::IsSupported(FPU))
3267
3268 __ bind(&calculate);
3269 if (tagged) {
3270 __ bind(&invalid_cache);
3271 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3272 masm->isolate()),
3273 1,
3274 1);
3275 } else {
3276 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3277 CpuFeatures::Scope scope(FPU);
3278
3279 Label no_update;
3280 Label skip_cache;
3281 const Register heap_number_map = t2;
3282
3283 // Call C function to calculate the result and update the cache.
3284 // Register a0 holds precalculated cache entry address; preserve
3285 // it on the stack and pop it into register cache_entry after the
3286 // call.
3287 __ push(cache_entry);
3288 GenerateCallCFunction(masm, scratch0);
3289 __ GetCFunctionDoubleResult(f4);
3290
3291 // Try to update the cache. If we cannot allocate a
3292 // heap number, we return the result without updating.
3293 __ pop(cache_entry);
3294 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3295 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3296 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3297
3298 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3299 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3300 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3301
3302 __ mov(v0, cache_entry);
3303 __ Ret();
3304
3305 __ bind(&invalid_cache);
3306 // The cache is invalid. Call runtime which will recreate the
3307 // cache.
3308 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3309 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3310 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3311 __ EnterInternalFrame();
3312 __ push(a0);
3313 __ CallRuntime(RuntimeFunction(), 1);
3314 __ LeaveInternalFrame();
3315 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3316 __ Ret();
3317
3318 __ bind(&skip_cache);
3319 // Call C function to calculate the result and answer directly
3320 // without updating the cache.
3321 GenerateCallCFunction(masm, scratch0);
3322 __ GetCFunctionDoubleResult(f4);
3323 __ bind(&no_update);
3324
3325 // We return the value in f4 without adding it to the cache, but
3326 // we cause a scavenging GC so that future allocations will succeed.
3327 __ EnterInternalFrame();
3328
3329 // Allocate an aligned object larger than a HeapNumber.
3330 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3331 __ li(scratch0, Operand(4 * kPointerSize));
3332 __ push(scratch0);
3333 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3334 __ LeaveInternalFrame();
3335 __ Ret();
3336 }
3337}
3338
3339
3340void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3341 Register scratch) {
3342 __ push(ra);
3343 __ PrepareCallCFunction(2, scratch);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003344 if (IsMipsSoftFloatABI) {
3345 __ Move(v0, v1, f4);
3346 } else {
3347 __ mov_d(f12, f4);
3348 }
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003349 switch (type_) {
3350 case TranscendentalCache::SIN:
3351 __ CallCFunction(
3352 ExternalReference::math_sin_double_function(masm->isolate()), 2);
3353 break;
3354 case TranscendentalCache::COS:
3355 __ CallCFunction(
3356 ExternalReference::math_cos_double_function(masm->isolate()), 2);
3357 break;
3358 case TranscendentalCache::LOG:
3359 __ CallCFunction(
3360 ExternalReference::math_log_double_function(masm->isolate()), 2);
3361 break;
3362 default:
3363 UNIMPLEMENTED();
3364 break;
3365 }
3366 __ pop(ra);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003367}
3368
3369
3370Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003371 switch (type_) {
3372 // Add more cases when necessary.
3373 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3374 case TranscendentalCache::COS: return Runtime::kMath_cos;
3375 case TranscendentalCache::LOG: return Runtime::kMath_log;
3376 default:
3377 UNIMPLEMENTED();
3378 return Runtime::kAbort;
3379 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00003380}
3381
3382
3383void StackCheckStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003384 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003385}
3386
3387
karlklose@chromium.org83a47282011-05-11 11:54:09 +00003388void MathPowStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003389 Label call_runtime;
3390
3391 if (CpuFeatures::IsSupported(FPU)) {
3392 CpuFeatures::Scope scope(FPU);
3393
3394 Label base_not_smi;
3395 Label exponent_not_smi;
3396 Label convert_exponent;
3397
3398 const Register base = a0;
3399 const Register exponent = a2;
3400 const Register heapnumbermap = t1;
3401 const Register heapnumber = s0; // Callee-saved register.
3402 const Register scratch = t2;
3403 const Register scratch2 = t3;
3404
3405 // Alocate FP values in the ABI-parameter-passing regs.
3406 const DoubleRegister double_base = f12;
3407 const DoubleRegister double_exponent = f14;
3408 const DoubleRegister double_result = f0;
3409 const DoubleRegister double_scratch = f2;
3410
3411 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3412 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3413 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3414
3415 // Convert base to double value and store it in f0.
3416 __ JumpIfNotSmi(base, &base_not_smi);
3417 // Base is a Smi. Untag and convert it.
3418 __ SmiUntag(base);
3419 __ mtc1(base, double_scratch);
3420 __ cvt_d_w(double_base, double_scratch);
3421 __ Branch(&convert_exponent);
3422
3423 __ bind(&base_not_smi);
3424 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3425 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3426 // Base is a heapnumber. Load it into double register.
3427 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3428
3429 __ bind(&convert_exponent);
3430 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3431 __ SmiUntag(exponent);
3432
3433 // The base is in a double register and the exponent is
3434 // an untagged smi. Allocate a heap number and call a
3435 // C function for integer exponents. The register containing
3436 // the heap number is callee-saved.
3437 __ AllocateHeapNumber(heapnumber,
3438 scratch,
3439 scratch2,
3440 heapnumbermap,
3441 &call_runtime);
3442 __ push(ra);
3443 __ PrepareCallCFunction(3, scratch);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003444 __ SetCallCDoubleArguments(double_base, exponent);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003445 __ CallCFunction(
3446 ExternalReference::power_double_int_function(masm->isolate()), 3);
3447 __ pop(ra);
3448 __ GetCFunctionDoubleResult(double_result);
3449 __ sdc1(double_result,
3450 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3451 __ mov(v0, heapnumber);
3452 __ DropAndRet(2 * kPointerSize);
3453
3454 __ bind(&exponent_not_smi);
3455 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3456 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3457 // Exponent is a heapnumber. Load it into double register.
3458 __ ldc1(double_exponent,
3459 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3460
3461 // The base and the exponent are in double registers.
3462 // Allocate a heap number and call a C function for
3463 // double exponents. The register containing
3464 // the heap number is callee-saved.
3465 __ AllocateHeapNumber(heapnumber,
3466 scratch,
3467 scratch2,
3468 heapnumbermap,
3469 &call_runtime);
3470 __ push(ra);
3471 __ PrepareCallCFunction(4, scratch);
3472 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3473 ASSERT(double_base.is(f12));
3474 ASSERT(double_exponent.is(f14));
danno@chromium.org40cb8782011-05-25 07:58:50 +00003475 __ SetCallCDoubleArguments(double_base, double_exponent);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003476 __ CallCFunction(
3477 ExternalReference::power_double_double_function(masm->isolate()), 4);
3478 __ pop(ra);
3479 __ GetCFunctionDoubleResult(double_result);
3480 __ sdc1(double_result,
3481 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3482 __ mov(v0, heapnumber);
3483 __ DropAndRet(2 * kPointerSize);
3484 }
3485
3486 __ bind(&call_runtime);
3487 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00003488}
3489
3490
lrn@chromium.org7516f052011-03-30 08:52:27 +00003491bool CEntryStub::NeedsImmovableCode() {
3492 return true;
3493}
3494
3495
3496void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003497 __ Throw(v0);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003498}
3499
3500
3501void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3502 UncatchableExceptionType type) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003503 __ ThrowUncatchable(type, v0);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003504}
3505
3506
3507void CEntryStub::GenerateCore(MacroAssembler* masm,
3508 Label* throw_normal_exception,
3509 Label* throw_termination_exception,
3510 Label* throw_out_of_memory_exception,
3511 bool do_gc,
3512 bool always_allocate) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003513 // v0: result parameter for PerformGC, if any
3514 // s0: number of arguments including receiver (C callee-saved)
3515 // s1: pointer to the first argument (C callee-saved)
3516 // s2: pointer to builtin function (C callee-saved)
3517
3518 if (do_gc) {
3519 // Move result passed in v0 into a0 to call PerformGC.
3520 __ mov(a0, v0);
3521 __ PrepareCallCFunction(1, a1);
3522 __ CallCFunction(
3523 ExternalReference::perform_gc_function(masm->isolate()), 1);
3524 }
3525
3526 ExternalReference scope_depth =
3527 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3528 if (always_allocate) {
3529 __ li(a0, Operand(scope_depth));
3530 __ lw(a1, MemOperand(a0));
3531 __ Addu(a1, a1, Operand(1));
3532 __ sw(a1, MemOperand(a0));
3533 }
3534
3535 // Prepare arguments for C routine: a0 = argc, a1 = argv
3536 __ mov(a0, s0);
3537 __ mov(a1, s1);
3538
3539 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3540 // also need to reserve the 4 argument slots on the stack.
3541
3542 __ AssertStackIsAligned();
3543
3544 __ li(a2, Operand(ExternalReference::isolate_address()));
3545
3546 // From arm version of this function:
3547 // TODO(1242173): To let the GC traverse the return address of the exit
3548 // frames, we need to know where the return address is. Right now,
3549 // we push it on the stack to be able to find it again, but we never
3550 // restore from it in case of changes, which makes it impossible to
3551 // support moving the C entry code stub. This should be fixed, but currently
3552 // this is OK because the CEntryStub gets generated so early in the V8 boot
3553 // sequence that it is not moving ever.
3554
3555 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3556 // This branch-and-link sequence is needed to find the current PC on mips,
3557 // saved to the ra register.
3558 // Use masm-> here instead of the double-underscore macro since extra
3559 // coverage code can interfere with the proper calculation of ra.
3560 Label find_ra;
3561 masm->bal(&find_ra); // bal exposes branch delay slot.
3562 masm->nop(); // Branch delay slot nop.
3563 masm->bind(&find_ra);
3564
3565 // Adjust the value in ra to point to the correct return location, 2nd
3566 // instruction past the real call into C code (the jalr(t9)), and push it.
3567 // This is the return address of the exit frame.
3568 const int kNumInstructionsToJump = 6;
3569 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3570 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3571 masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3572 // Stack is still aligned.
3573
3574 // Call the C routine.
3575 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3576 masm->jalr(t9);
3577 masm->nop(); // Branch delay slot nop.
3578 // Make sure the stored 'ra' points to this position.
3579 ASSERT_EQ(kNumInstructionsToJump,
3580 masm->InstructionsGeneratedSince(&find_ra));
3581 }
3582
3583 // Restore stack (remove arg slots).
3584 __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3585
3586 if (always_allocate) {
3587 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3588 __ li(a2, Operand(scope_depth));
3589 __ lw(a3, MemOperand(a2));
3590 __ Subu(a3, a3, Operand(1));
3591 __ sw(a3, MemOperand(a2));
3592 }
3593
3594 // Check for failure result.
3595 Label failure_returned;
3596 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3597 __ addiu(a2, v0, 1);
3598 __ andi(t0, a2, kFailureTagMask);
3599 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3600
3601 // Exit C frame and return.
3602 // v0:v1: result
3603 // sp: stack pointer
3604 // fp: frame pointer
3605 __ LeaveExitFrame(save_doubles_, s0);
3606 __ Ret();
3607
3608 // Check if we should retry or throw exception.
3609 Label retry;
3610 __ bind(&failure_returned);
3611 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3612 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3613 __ Branch(&retry, eq, t0, Operand(zero_reg));
3614
3615 // Special handling of out of memory exceptions.
3616 Failure* out_of_memory = Failure::OutOfMemoryException();
3617 __ Branch(throw_out_of_memory_exception, eq,
3618 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3619
3620 // Retrieve the pending exception and clear the variable.
3621 __ li(t0,
3622 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3623 __ lw(a3, MemOperand(t0));
3624 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3625 masm->isolate())));
3626 __ lw(v0, MemOperand(t0));
3627 __ sw(a3, MemOperand(t0));
3628
3629 // Special handling of termination exceptions which are uncatchable
3630 // by javascript code.
3631 __ Branch(throw_termination_exception, eq,
3632 v0, Operand(masm->isolate()->factory()->termination_exception()));
3633
3634 // Handle normal exception.
3635 __ jmp(throw_normal_exception);
3636
3637 __ bind(&retry);
3638 // Last failure (v0) will be moved to (a0) for parameter when retrying.
lrn@chromium.org7516f052011-03-30 08:52:27 +00003639}
3640
3641
3642void CEntryStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003643 // Called from JavaScript; parameters are on stack as if calling JS function
3644 // a0: number of arguments including receiver
3645 // a1: pointer to builtin function
3646 // fp: frame pointer (restored after C call)
3647 // sp: stack pointer (restored as callee's sp after C call)
3648 // cp: current context (C callee-saved)
3649
3650 // NOTE: Invocations of builtins may return failure objects
3651 // instead of a proper result. The builtin entry handles
3652 // this by performing a garbage collection and retrying the
3653 // builtin once.
3654
3655 // Compute the argv pointer in a callee-saved register.
3656 __ sll(s1, a0, kPointerSizeLog2);
3657 __ Addu(s1, sp, s1);
3658 __ Subu(s1, s1, Operand(kPointerSize));
3659
3660 // Enter the exit frame that transitions from JavaScript to C++.
3661 __ EnterExitFrame(save_doubles_);
3662
3663 // Setup argc and the builtin function in callee-saved registers.
3664 __ mov(s0, a0);
3665 __ mov(s2, a1);
3666
3667 // s0: number of arguments (C callee-saved)
3668 // s1: pointer to first argument (C callee-saved)
3669 // s2: pointer to builtin function (C callee-saved)
3670
3671 Label throw_normal_exception;
3672 Label throw_termination_exception;
3673 Label throw_out_of_memory_exception;
3674
3675 // Call into the runtime system.
3676 GenerateCore(masm,
3677 &throw_normal_exception,
3678 &throw_termination_exception,
3679 &throw_out_of_memory_exception,
3680 false,
3681 false);
3682
3683 // Do space-specific GC and retry runtime call.
3684 GenerateCore(masm,
3685 &throw_normal_exception,
3686 &throw_termination_exception,
3687 &throw_out_of_memory_exception,
3688 true,
3689 false);
3690
3691 // Do full GC and retry runtime call one final time.
3692 Failure* failure = Failure::InternalError();
3693 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3694 GenerateCore(masm,
3695 &throw_normal_exception,
3696 &throw_termination_exception,
3697 &throw_out_of_memory_exception,
3698 true,
3699 true);
3700
3701 __ bind(&throw_out_of_memory_exception);
3702 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3703
3704 __ bind(&throw_termination_exception);
3705 GenerateThrowUncatchable(masm, TERMINATION);
3706
3707 __ bind(&throw_normal_exception);
3708 GenerateThrowTOS(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003709}
3710
3711
3712void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003713 Label invoke, exit;
3714
3715 // Registers:
3716 // a0: entry address
3717 // a1: function
3718 // a2: reveiver
3719 // a3: argc
3720 //
3721 // Stack:
3722 // 4 args slots
3723 // args
3724
3725 // Save callee saved registers on the stack.
3726 __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
3727
3728 // Load argv in s0 register.
3729 __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
3730 StandardFrameConstants::kCArgsSlotsSize));
3731
3732 // We build an EntryFrame.
3733 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3734 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3735 __ li(t2, Operand(Smi::FromInt(marker)));
3736 __ li(t1, Operand(Smi::FromInt(marker)));
3737 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3738 masm->isolate())));
3739 __ lw(t0, MemOperand(t0));
3740 __ Push(t3, t2, t1, t0);
3741 // Setup frame pointer for the frame to be pushed.
3742 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3743
3744 // Registers:
3745 // a0: entry_address
3746 // a1: function
3747 // a2: reveiver_pointer
3748 // a3: argc
3749 // s0: argv
3750 //
3751 // Stack:
3752 // caller fp |
3753 // function slot | entry frame
3754 // context slot |
3755 // bad fp (0xff...f) |
3756 // callee saved registers + ra
3757 // 4 args slots
3758 // args
3759
3760 #ifdef ENABLE_LOGGING_AND_PROFILING
3761 // If this is the outermost JS call, set js_entry_sp value.
danno@chromium.org40cb8782011-05-25 07:58:50 +00003762 Label non_outermost_js;
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003763 ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
3764 masm->isolate());
3765 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3766 __ lw(t2, MemOperand(t1));
danno@chromium.org40cb8782011-05-25 07:58:50 +00003767 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
3768 __ sw(fp, MemOperand(t1));
3769 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3770 Label cont;
3771 __ b(&cont);
3772 __ nop(); // Branch delay slot nop.
3773 __ bind(&non_outermost_js);
3774 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3775 __ bind(&cont);
3776 __ push(t0);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003777 #endif
3778
3779 // Call a faked try-block that does the invoke.
3780 __ bal(&invoke); // bal exposes branch delay slot.
3781 __ nop(); // Branch delay slot nop.
3782
3783 // Caught exception: Store result (exception) in the pending
3784 // exception field in the JSEnv and return a failure sentinel.
3785 // Coming in here the fp will be invalid because the PushTryHandler below
3786 // sets it to 0 to signal the existence of the JSEntry frame.
3787 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3788 masm->isolate())));
3789 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
3790 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3791 __ b(&exit); // b exposes branch delay slot.
3792 __ nop(); // Branch delay slot nop.
3793
3794 // Invoke: Link this frame into the handler chain.
3795 __ bind(&invoke);
3796 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
3797 // If an exception not caught by another handler occurs, this handler
3798 // returns control to the code after the bal(&invoke) above, which
3799 // restores all kCalleeSaved registers (including cp and fp) to their
3800 // saved values before returning a failure to C.
3801
3802 // Clear any pending exceptions.
3803 __ li(t0,
3804 Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3805 __ lw(t1, MemOperand(t0));
3806 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3807 masm->isolate())));
3808 __ sw(t1, MemOperand(t0));
3809
3810 // Invoke the function by calling through JS entry trampoline builtin.
3811 // Notice that we cannot store a reference to the trampoline code directly in
3812 // this stub, because runtime stubs are not traversed when doing GC.
3813
3814 // Registers:
3815 // a0: entry_address
3816 // a1: function
3817 // a2: reveiver_pointer
3818 // a3: argc
3819 // s0: argv
3820 //
3821 // Stack:
3822 // handler frame
3823 // entry frame
3824 // callee saved registers + ra
3825 // 4 args slots
3826 // args
3827
3828 if (is_construct) {
3829 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
3830 masm->isolate());
3831 __ li(t0, Operand(construct_entry));
3832 } else {
3833 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
3834 __ li(t0, Operand(entry));
3835 }
3836 __ lw(t9, MemOperand(t0)); // Deref address.
3837
3838 // Call JSEntryTrampoline.
3839 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
3840 __ Call(t9);
3841
danno@chromium.org40cb8782011-05-25 07:58:50 +00003842 // Unlink this frame from the handler chain.
3843 __ PopTryHandler();
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003844
danno@chromium.org40cb8782011-05-25 07:58:50 +00003845 __ bind(&exit); // v0 holds result
3846 #ifdef ENABLE_LOGGING_AND_PROFILING
3847 // Check if the current stack frame is marked as the outermost JS frame.
3848 Label non_outermost_js_2;
3849 __ pop(t1);
3850 __ Branch(&non_outermost_js_2, ne, t1,
3851 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3852 __ li(t1, Operand(ExternalReference(js_entry_sp)));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003853 __ sw(zero_reg, MemOperand(t1));
danno@chromium.org40cb8782011-05-25 07:58:50 +00003854 __ bind(&non_outermost_js_2);
3855 #endif
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003856
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003857 // Restore the top frame descriptors from the stack.
3858 __ pop(t1);
3859 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3860 masm->isolate())));
3861 __ sw(t1, MemOperand(t0));
3862
3863 // Reset the stack to the callee saved registers.
3864 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
3865
3866 // Restore callee saved registers from the stack.
3867 __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
3868 // Return.
3869 __ Jump(ra);
lrn@chromium.org7516f052011-03-30 08:52:27 +00003870}
3871
3872
danno@chromium.org40cb8782011-05-25 07:58:50 +00003873// Uses registers a0 to t0.
3874// Expected input (depending on whether args are in registers or on the stack):
3875// * object: a0 or at sp + 1 * kPointerSize.
3876// * function: a1 or at sp.
3877//
3878// Inlined call site patching is a crankshaft-specific feature that is not
3879// implemented on MIPS.
lrn@chromium.org7516f052011-03-30 08:52:27 +00003880void InstanceofStub::Generate(MacroAssembler* masm) {
danno@chromium.org40cb8782011-05-25 07:58:50 +00003881 // This is a crankshaft-specific feature that has not been implemented yet.
3882 ASSERT(!HasCallSiteInlineCheck());
3883 // Call site inlining and patching implies arguments in registers.
3884 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3885 // ReturnTrueFalse is only implemented for inlined call sites.
3886 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3887
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003888 // Fixed register usage throughout the stub:
3889 const Register object = a0; // Object (lhs).
danno@chromium.org40cb8782011-05-25 07:58:50 +00003890 Register map = a3; // Map of the object.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003891 const Register function = a1; // Function (rhs).
3892 const Register prototype = t0; // Prototype of the function.
danno@chromium.org40cb8782011-05-25 07:58:50 +00003893 const Register inline_site = t5;
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003894 const Register scratch = a2;
danno@chromium.org40cb8782011-05-25 07:58:50 +00003895
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003896 Label slow, loop, is_instance, is_not_instance, not_js_object;
danno@chromium.org40cb8782011-05-25 07:58:50 +00003897
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003898 if (!HasArgsInRegisters()) {
3899 __ lw(object, MemOperand(sp, 1 * kPointerSize));
3900 __ lw(function, MemOperand(sp, 0));
3901 }
3902
3903 // Check that the left hand is a JS object and load map.
3904 __ JumpIfSmi(object, &not_js_object);
3905 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
3906
danno@chromium.org40cb8782011-05-25 07:58:50 +00003907 // If there is a call site cache don't look in the global cache, but do the
3908 // real lookup and update the call site cache.
3909 if (!HasCallSiteInlineCheck()) {
3910 Label miss;
3911 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
3912 __ Branch(&miss, ne, function, Operand(t1));
3913 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
3914 __ Branch(&miss, ne, map, Operand(t1));
3915 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3916 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003917
danno@chromium.org40cb8782011-05-25 07:58:50 +00003918 __ bind(&miss);
3919 }
3920
3921 // Get the prototype of the function.
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003922 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
3923
3924 // Check that the function prototype is a JS object.
3925 __ JumpIfSmi(prototype, &slow);
3926 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
3927
danno@chromium.org40cb8782011-05-25 07:58:50 +00003928 // Update the global instanceof or call site inlined cache with the current
3929 // map and function. The cached answer will be set when it is known below.
3930 if (!HasCallSiteInlineCheck()) {
3931 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3932 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3933 } else {
3934 UNIMPLEMENTED_MIPS();
3935 }
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003936
3937 // Register mapping: a3 is object map and t0 is function prototype.
3938 // Get prototype of object into a2.
3939 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3940
danno@chromium.org40cb8782011-05-25 07:58:50 +00003941 // We don't need map any more. Use it as a scratch register.
3942 Register scratch2 = map;
3943 map = no_reg;
3944
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003945 // Loop through the prototype chain looking for the function prototype.
danno@chromium.org40cb8782011-05-25 07:58:50 +00003946 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003947 __ bind(&loop);
3948 __ Branch(&is_instance, eq, scratch, Operand(prototype));
danno@chromium.org40cb8782011-05-25 07:58:50 +00003949 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003950 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
3951 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
3952 __ Branch(&loop);
3953
3954 __ bind(&is_instance);
3955 ASSERT(Smi::FromInt(0) == 0);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003956 if (!HasCallSiteInlineCheck()) {
3957 __ mov(v0, zero_reg);
3958 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3959 } else {
3960 UNIMPLEMENTED_MIPS();
3961 }
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003962 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3963
3964 __ bind(&is_not_instance);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003965 if (!HasCallSiteInlineCheck()) {
3966 __ li(v0, Operand(Smi::FromInt(1)));
3967 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3968 } else {
3969 UNIMPLEMENTED_MIPS();
3970 }
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003971 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3972
3973 Label object_not_null, object_not_null_or_smi;
3974 __ bind(&not_js_object);
3975 // Before null, smi and string value checks, check that the rhs is a function
3976 // as for a non-function rhs an exception needs to be thrown.
3977 __ JumpIfSmi(function, &slow);
danno@chromium.org40cb8782011-05-25 07:58:50 +00003978 __ GetObjectType(function, scratch2, scratch);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00003979 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
3980
3981 // Null is not instance of anything.
3982 __ Branch(&object_not_null, ne, scratch,
3983 Operand(masm->isolate()->factory()->null_value()));
3984 __ li(v0, Operand(Smi::FromInt(1)));
3985 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3986
3987 __ bind(&object_not_null);
3988 // Smi values are not instances of anything.
3989 __ JumpIfNotSmi(object, &object_not_null_or_smi);
3990 __ li(v0, Operand(Smi::FromInt(1)));
3991 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3992
3993 __ bind(&object_not_null_or_smi);
3994 // String values are not instances of anything.
3995 __ IsObjectJSStringType(object, scratch, &slow);
3996 __ li(v0, Operand(Smi::FromInt(1)));
3997 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3998
3999 // Slow-case. Tail call builtin.
4000 __ bind(&slow);
danno@chromium.org40cb8782011-05-25 07:58:50 +00004001 if (!ReturnTrueFalseObject()) {
4002 if (HasArgsInRegisters()) {
4003 __ Push(a0, a1);
4004 }
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004005 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
danno@chromium.org40cb8782011-05-25 07:58:50 +00004006 } else {
4007 __ EnterInternalFrame();
4008 __ Push(a0, a1);
4009 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4010 __ LeaveInternalFrame();
4011 __ mov(a0, v0);
4012 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4013 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4014 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4015 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4016 }
lrn@chromium.org7516f052011-03-30 08:52:27 +00004017}
4018
4019
danno@chromium.org40cb8782011-05-25 07:58:50 +00004020Register InstanceofStub::left() { return a0; }
4021
4022
4023Register InstanceofStub::right() { return a1; }
4024
4025
lrn@chromium.org7516f052011-03-30 08:52:27 +00004026void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004027 // The displacement is the offset of the last parameter (if any)
4028 // relative to the frame pointer.
4029 static const int kDisplacement =
4030 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4031
4032 // Check that the key is a smiGenerateReadElement.
4033 Label slow;
4034 __ JumpIfNotSmi(a1, &slow);
4035
4036 // Check if the calling frame is an arguments adaptor frame.
4037 Label adaptor;
4038 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4039 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4040 __ Branch(&adaptor,
4041 eq,
4042 a3,
4043 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4044
4045 // Check index (a1) against formal parameters count limit passed in
4046 // through register a0. Use unsigned comparison to get negative
4047 // check for free.
4048 __ Branch(&slow, hs, a1, Operand(a0));
4049
4050 // Read the argument from the stack and return it.
4051 __ subu(a3, a0, a1);
4052 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4053 __ Addu(a3, fp, Operand(t3));
4054 __ lw(v0, MemOperand(a3, kDisplacement));
4055 __ Ret();
4056
4057 // Arguments adaptor case: Check index (a1) against actual arguments
4058 // limit found in the arguments adaptor frame. Use unsigned
4059 // comparison to get negative check for free.
4060 __ bind(&adaptor);
4061 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4062 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4063
4064 // Read the argument from the adaptor frame and return it.
4065 __ subu(a3, a0, a1);
4066 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4067 __ Addu(a3, a2, Operand(t3));
4068 __ lw(v0, MemOperand(a3, kDisplacement));
4069 __ Ret();
4070
4071 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4072 // by calling the runtime system.
4073 __ bind(&slow);
4074 __ push(a1);
4075 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004076}
4077
4078
4079void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004080 // sp[0] : number of parameters
4081 // sp[4] : receiver displacement
4082 // sp[8] : function
4083
4084 // Check if the calling frame is an arguments adaptor frame.
4085 Label adaptor_frame, try_allocate, runtime;
4086 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4087 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4088 __ Branch(&adaptor_frame,
4089 eq,
4090 a3,
4091 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4092
4093 // Get the length from the frame.
4094 __ lw(a1, MemOperand(sp, 0));
4095 __ Branch(&try_allocate);
4096
4097 // Patch the arguments.length and the parameters pointer.
4098 __ bind(&adaptor_frame);
4099 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4100 __ sw(a1, MemOperand(sp, 0));
4101 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4102 __ Addu(a3, a2, Operand(at));
4103
4104 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4105 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4106
4107 // Try the new space allocation. Start out with computing the size
4108 // of the arguments object and the elements array in words.
4109 Label add_arguments_object;
4110 __ bind(&try_allocate);
4111 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4112 __ srl(a1, a1, kSmiTagSize);
4113
4114 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4115 __ bind(&add_arguments_object);
4116 __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
4117
4118 // Do the allocation of both objects in one go.
4119 __ AllocateInNewSpace(
4120 a1,
4121 v0,
4122 a2,
4123 a3,
4124 &runtime,
4125 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4126
4127 // Get the arguments boilerplate from the current (global) context.
4128 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4129 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4130 __ lw(t0, MemOperand(t0,
4131 Context::SlotOffset(GetArgumentsBoilerplateIndex())));
4132
4133 // Copy the JS object part.
4134 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4135
4136 if (type_ == NEW_NON_STRICT) {
4137 // Setup the callee in-object property.
4138 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4139 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4140 const int kCalleeOffset = JSObject::kHeaderSize +
4141 Heap::kArgumentsCalleeIndex * kPointerSize;
4142 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4143 }
4144
4145 // Get the length (smi tagged) and set that as an in-object property too.
4146 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4147 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4148 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4149 Heap::kArgumentsLengthIndex * kPointerSize));
4150
4151 Label done;
4152 __ Branch(&done, eq, a1, Operand(zero_reg));
4153
4154 // Get the parameters pointer from the stack.
4155 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4156
4157 // Setup the elements pointer in the allocated arguments object and
4158 // initialize the header in the elements fixed array.
4159 __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
4160 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4161 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4162 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4163 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4164 __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
4165
4166 // Copy the fixed array slots.
4167 Label loop;
4168 // Setup t0 to point to the first array slot.
4169 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4170 __ bind(&loop);
4171 // Pre-decrement a2 with kPointerSize on each iteration.
4172 // Pre-decrement in order to skip receiver.
4173 __ Addu(a2, a2, Operand(-kPointerSize));
4174 __ lw(a3, MemOperand(a2));
4175 // Post-increment t0 with kPointerSize on each iteration.
4176 __ sw(a3, MemOperand(t0));
4177 __ Addu(t0, t0, Operand(kPointerSize));
4178 __ Subu(a1, a1, Operand(1));
4179 __ Branch(&loop, ne, a1, Operand(zero_reg));
4180
4181 // Return and remove the on-stack parameters.
4182 __ bind(&done);
4183 __ Addu(sp, sp, Operand(3 * kPointerSize));
4184 __ Ret();
4185
4186 // Do the runtime call to allocate the arguments object.
4187 __ bind(&runtime);
4188 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004189}
4190
4191
4192void RegExpExecStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004193 // Just jump directly to runtime if native RegExp is not selected at compile
4194 // time or if regexp entry in generated code is turned off runtime switch or
4195 // at compilation.
4196#ifdef V8_INTERPRETED_REGEXP
4197 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4198#else // V8_INTERPRETED_REGEXP
4199 if (!FLAG_regexp_entry_native) {
4200 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4201 return;
4202 }
4203
4204 // Stack frame on entry.
4205 // sp[0]: last_match_info (expected JSArray)
4206 // sp[4]: previous index
4207 // sp[8]: subject string
4208 // sp[12]: JSRegExp object
4209
4210 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4211 static const int kPreviousIndexOffset = 1 * kPointerSize;
4212 static const int kSubjectOffset = 2 * kPointerSize;
4213 static const int kJSRegExpOffset = 3 * kPointerSize;
4214
4215 Label runtime, invoke_regexp;
4216
4217 // Allocation of registers for this function. These are in callee save
4218 // registers and will be preserved by the call to the native RegExp code, as
4219 // this code is called using the normal C calling convention. When calling
4220 // directly from generated code the native RegExp code will not do a GC and
4221 // therefore the content of these registers are safe to use after the call.
4222 // MIPS - using s0..s2, since we are not using CEntry Stub.
4223 Register subject = s0;
4224 Register regexp_data = s1;
4225 Register last_match_info_elements = s2;
4226
4227 // Ensure that a RegExp stack is allocated.
4228 ExternalReference address_of_regexp_stack_memory_address =
4229 ExternalReference::address_of_regexp_stack_memory_address(
4230 masm->isolate());
4231 ExternalReference address_of_regexp_stack_memory_size =
4232 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
4233 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4234 __ lw(a0, MemOperand(a0, 0));
4235 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4236
4237 // Check that the first argument is a JSRegExp object.
4238 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4239 STATIC_ASSERT(kSmiTag == 0);
4240 __ JumpIfSmi(a0, &runtime);
4241 __ GetObjectType(a0, a1, a1);
4242 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4243
4244 // Check that the RegExp has been compiled (data contains a fixed array).
4245 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4246 if (FLAG_debug_code) {
4247 __ And(t0, regexp_data, Operand(kSmiTagMask));
4248 __ Check(nz,
4249 "Unexpected type for RegExp data, FixedArray expected",
4250 t0,
4251 Operand(zero_reg));
4252 __ GetObjectType(regexp_data, a0, a0);
4253 __ Check(eq,
4254 "Unexpected type for RegExp data, FixedArray expected",
4255 a0,
4256 Operand(FIXED_ARRAY_TYPE));
4257 }
4258
4259 // regexp_data: RegExp data (FixedArray)
4260 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4261 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4262 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4263
4264 // regexp_data: RegExp data (FixedArray)
4265 // Check that the number of captures fit in the static offsets vector buffer.
4266 __ lw(a2,
4267 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4268 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4269 // uses the asumption that smis are 2 * their untagged value.
4270 STATIC_ASSERT(kSmiTag == 0);
4271 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4272 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4273 // Check that the static offsets vector buffer is large enough.
4274 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4275
4276 // a2: Number of capture registers
4277 // regexp_data: RegExp data (FixedArray)
4278 // Check that the second argument is a string.
4279 __ lw(subject, MemOperand(sp, kSubjectOffset));
4280 __ JumpIfSmi(subject, &runtime);
4281 __ GetObjectType(subject, a0, a0);
4282 __ And(a0, a0, Operand(kIsNotStringMask));
4283 STATIC_ASSERT(kStringTag == 0);
4284 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4285
4286 // Get the length of the string to r3.
4287 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4288
4289 // a2: Number of capture registers
4290 // a3: Length of subject string as a smi
4291 // subject: Subject string
4292 // regexp_data: RegExp data (FixedArray)
4293 // Check that the third argument is a positive smi less than the subject
4294 // string length. A negative value will be greater (unsigned comparison).
4295 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4296 __ And(at, a0, Operand(kSmiTagMask));
4297 __ Branch(&runtime, ne, at, Operand(zero_reg));
4298 __ Branch(&runtime, ls, a3, Operand(a0));
4299
4300 // a2: Number of capture registers
4301 // subject: Subject string
4302 // regexp_data: RegExp data (FixedArray)
4303 // Check that the fourth object is a JSArray object.
4304 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4305 __ JumpIfSmi(a0, &runtime);
4306 __ GetObjectType(a0, a1, a1);
4307 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4308 // Check that the JSArray is in fast case.
4309 __ lw(last_match_info_elements,
4310 FieldMemOperand(a0, JSArray::kElementsOffset));
4311 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4312 __ Branch(&runtime, ne, a0, Operand(
4313 masm->isolate()->factory()->fixed_array_map()));
4314 // Check that the last match info has space for the capture registers and the
4315 // additional information.
4316 __ lw(a0,
4317 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4318 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4319 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4320 __ Branch(&runtime, gt, a2, Operand(at));
4321 // subject: Subject string
4322 // regexp_data: RegExp data (FixedArray)
4323 // Check the representation and encoding of the subject string.
4324 Label seq_string;
4325 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4326 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4327 // First check for flat string.
4328 __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
4329 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4330 __ Branch(&seq_string, eq, at, Operand(zero_reg));
4331
4332 // subject: Subject string
4333 // a0: instance type if Subject string
4334 // regexp_data: RegExp data (FixedArray)
4335 // Check for flat cons string.
4336 // A flat cons string is a cons string where the second part is the empty
4337 // string. In that case the subject string is just the first part of the cons
4338 // string. Also in this case the first part of the cons string is known to be
4339 // a sequential string or an external string.
4340 STATIC_ASSERT(kExternalStringTag != 0);
4341 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
4342 __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
4343 __ Branch(&runtime, ne, at, Operand(zero_reg));
4344 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4345 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4346 __ Branch(&runtime, ne, a0, Operand(a1));
4347 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4348 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4349 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4350 // Is first part a flat string?
4351 STATIC_ASSERT(kSeqStringTag == 0);
4352 __ And(at, a0, Operand(kStringRepresentationMask));
4353 __ Branch(&runtime, ne, at, Operand(zero_reg));
4354
4355 __ bind(&seq_string);
4356 // subject: Subject string
4357 // regexp_data: RegExp data (FixedArray)
4358 // a0: Instance type of subject string
4359 STATIC_ASSERT(kStringEncodingMask == 4);
4360 STATIC_ASSERT(kAsciiStringTag == 4);
4361 STATIC_ASSERT(kTwoByteStringTag == 0);
4362 // Find the code object based on the assumptions above.
4363 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
4364 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
4365 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
4366 __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
4367 __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
4368
4369 // Check that the irregexp code has been generated for the actual string
4370 // encoding. If it has, the field contains a code object otherwise it
4371 // contains the hole.
4372 __ GetObjectType(t9, a0, a0);
4373 __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
4374
4375 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4376 // t9: code
4377 // subject: Subject string
4378 // regexp_data: RegExp data (FixedArray)
4379 // Load used arguments before starting to push arguments for call to native
4380 // RegExp code to avoid handling changing stack height.
4381 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4382 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
4383
4384 // a1: previous index
4385 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4386 // t9: code
4387 // subject: Subject string
4388 // regexp_data: RegExp data (FixedArray)
4389 // All checks done. Now push arguments for native regexp code.
4390 __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
4391 1, a0, a2);
4392
4393 // Isolates: note we add an additional parameter here (isolate pointer).
4394 static const int kRegExpExecuteArguments = 8;
4395 static const int kParameterRegisters = 4;
4396 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4397
4398 // Stack pointer now points to cell where return address is to be written.
4399 // Arguments are before that on the stack or in registers, meaning we
4400 // treat the return address as argument 5. Thus every argument after that
4401 // needs to be shifted back by 1. Since DirectCEntryStub will handle
4402 // allocating space for the c argument slots, we don't need to calculate
4403 // that into the argument positions on the stack. This is how the stack will
4404 // look (sp meaning the value of sp at this moment):
4405 // [sp + 4] - Argument 8
4406 // [sp + 3] - Argument 7
4407 // [sp + 2] - Argument 6
4408 // [sp + 1] - Argument 5
4409 // [sp + 0] - saved ra
4410
4411 // Argument 8: Pass current isolate address.
4412 // CFunctionArgumentOperand handles MIPS stack argument slots.
4413 __ li(a0, Operand(ExternalReference::isolate_address()));
4414 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4415
4416 // Argument 7: Indicate that this is a direct call from JavaScript.
4417 __ li(a0, Operand(1));
4418 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4419
4420 // Argument 6: Start (high end) of backtracking stack memory area.
4421 __ li(a0, Operand(address_of_regexp_stack_memory_address));
4422 __ lw(a0, MemOperand(a0, 0));
4423 __ li(a2, Operand(address_of_regexp_stack_memory_size));
4424 __ lw(a2, MemOperand(a2, 0));
4425 __ addu(a0, a0, a2);
4426 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4427
4428 // Argument 5: static offsets vector buffer.
4429 __ li(a0, Operand(
4430 ExternalReference::address_of_static_offsets_vector(masm->isolate())));
4431 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4432
4433 // For arguments 4 and 3 get string length, calculate start of string data
4434 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
4435 __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
4436 __ sra(a0, a0, kSmiTagSize);
4437 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4438 __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4439 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
4440 // Argument 4 (a3): End of string data
4441 // Argument 3 (a2): Start of string data
4442 __ sllv(t1, a1, a3);
4443 __ addu(a2, t0, t1);
4444 __ sllv(t1, a0, a3);
4445 __ addu(a3, t0, t1);
4446
4447 // Argument 2 (a1): Previous index.
4448 // Already there
4449
4450 // Argument 1 (a0): Subject string.
4451 __ mov(a0, subject);
4452
4453 // Locate the code entry and call it.
4454 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4455 DirectCEntryStub stub;
4456 stub.GenerateCall(masm, t9);
4457
4458 __ LeaveExitFrame(false, no_reg);
4459
4460 // v0: result
4461 // subject: subject string (callee saved)
4462 // regexp_data: RegExp data (callee saved)
4463 // last_match_info_elements: Last match info elements (callee saved)
4464
4465 // Check the result.
4466
4467 Label success;
4468 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4469 Label failure;
4470 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
4471 // If not exception it can only be retry. Handle that in the runtime system.
4472 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4473 // Result must now be exception. If there is no pending exception already a
4474 // stack overflow (on the backtrack stack) was detected in RegExp code but
4475 // haven't created the exception yet. Handle that in the runtime system.
4476 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4477 __ li(a1, Operand(
4478 ExternalReference::the_hole_value_location(masm->isolate())));
4479 __ lw(a1, MemOperand(a1, 0));
4480 __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
4481 masm->isolate())));
4482 __ lw(v0, MemOperand(a2, 0));
4483 __ Branch(&runtime, eq, v0, Operand(a1));
4484
4485 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
4486
4487 // Check if the exception is a termination. If so, throw as uncatchable.
4488 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
4489 Label termination_exception;
4490 __ Branch(&termination_exception, eq, v0, Operand(a0));
4491
4492 __ Throw(a0); // Expects thrown value in v0.
4493
4494 __ bind(&termination_exception);
4495 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
4496
4497 __ bind(&failure);
4498 // For failure and exception return null.
4499 __ li(v0, Operand(masm->isolate()->factory()->null_value()));
4500 __ Addu(sp, sp, Operand(4 * kPointerSize));
4501 __ Ret();
4502
4503 // Process the result from the native regexp code.
4504 __ bind(&success);
4505 __ lw(a1,
4506 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4507 // Calculate number of capture registers (number_of_captures + 1) * 2.
4508 STATIC_ASSERT(kSmiTag == 0);
4509 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4510 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
4511
4512 // a1: number of capture registers
4513 // subject: subject string
4514 // Store the capture count.
4515 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
4516 __ sw(a2, FieldMemOperand(last_match_info_elements,
4517 RegExpImpl::kLastCaptureCountOffset));
4518 // Store last subject and last input.
4519 __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
4520 __ sw(subject,
4521 FieldMemOperand(last_match_info_elements,
4522 RegExpImpl::kLastSubjectOffset));
4523 __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
4524 __ sw(subject,
4525 FieldMemOperand(last_match_info_elements,
4526 RegExpImpl::kLastInputOffset));
4527 __ mov(a3, last_match_info_elements);
4528 __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
4529
4530 // Get the static offsets vector filled by the native regexp code.
4531 ExternalReference address_of_static_offsets_vector =
4532 ExternalReference::address_of_static_offsets_vector(masm->isolate());
4533 __ li(a2, Operand(address_of_static_offsets_vector));
4534
4535 // a1: number of capture registers
4536 // a2: offsets vector
4537 Label next_capture, done;
4538 // Capture register counter starts from number of capture registers and
4539 // counts down until wrapping after zero.
4540 __ Addu(a0,
4541 last_match_info_elements,
4542 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4543 __ bind(&next_capture);
4544 __ Subu(a1, a1, Operand(1));
4545 __ Branch(&done, lt, a1, Operand(zero_reg));
4546 // Read the value from the static offsets vector buffer.
4547 __ lw(a3, MemOperand(a2, 0));
4548 __ addiu(a2, a2, kPointerSize);
4549 // Store the smi value in the last match info.
4550 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
4551 __ sw(a3, MemOperand(a0, 0));
4552 __ Branch(&next_capture, USE_DELAY_SLOT);
4553 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
4554
4555 __ bind(&done);
4556
4557 // Return last match info.
4558 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
4559 __ Addu(sp, sp, Operand(4 * kPointerSize));
4560 __ Ret();
4561
4562 // Do the runtime call to execute the regexp.
4563 __ bind(&runtime);
4564 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4565#endif // V8_INTERPRETED_REGEXP
lrn@chromium.org7516f052011-03-30 08:52:27 +00004566}
4567
4568
4569void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004570 const int kMaxInlineLength = 100;
4571 Label slowcase;
4572 Label done;
4573 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4574 STATIC_ASSERT(kSmiTag == 0);
4575 STATIC_ASSERT(kSmiTagSize == 1);
4576 __ JumpIfNotSmi(a1, &slowcase);
4577 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
4578 // Smi-tagging is equivalent to multiplying by 2.
4579 // Allocate RegExpResult followed by FixedArray with size in ebx.
4580 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4581 // Elements: [Map][Length][..elements..]
4582 // Size of JSArray with two in-object properties and the header of a
4583 // FixedArray.
4584 int objects_size =
4585 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4586 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
4587 __ Addu(a2, t1, Operand(objects_size));
4588 __ AllocateInNewSpace(
4589 a2, // In: Size, in words.
4590 v0, // Out: Start of allocation (tagged).
4591 a3, // Scratch register.
4592 t0, // Scratch register.
4593 &slowcase,
4594 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4595 // v0: Start of allocated area, object-tagged.
4596 // a1: Number of elements in array, as smi.
4597 // t1: Number of elements, untagged.
4598
4599 // Set JSArray map to global.regexp_result_map().
4600 // Set empty properties FixedArray.
4601 // Set elements to point to FixedArray allocated right after the JSArray.
4602 // Interleave operations for better latency.
4603 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
4604 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
4605 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
4606 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
4607 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
4608 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
4609 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4610 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
4611
4612 // Set input, index and length fields from arguments.
4613 __ lw(a1, MemOperand(sp, kPointerSize * 0));
4614 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
4615 __ lw(a1, MemOperand(sp, kPointerSize * 1));
4616 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
4617 __ lw(a1, MemOperand(sp, kPointerSize * 2));
4618 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
4619
4620 // Fill out the elements FixedArray.
4621 // v0: JSArray, tagged.
4622 // a3: FixedArray, tagged.
4623 // t1: Number of elements in array, untagged.
4624
4625 // Set map.
4626 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
4627 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
4628 // Set FixedArray length.
4629 __ sll(t2, t1, kSmiTagSize);
4630 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4631 // Fill contents of fixed-array with the-hole.
4632 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
4633 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4634 // Fill fixed array elements with hole.
4635 // v0: JSArray, tagged.
4636 // a2: the hole.
4637 // a3: Start of elements in FixedArray.
4638 // t1: Number of elements to fill.
4639 Label loop;
4640 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
4641 __ addu(t1, t1, a3); // Point past last element to store.
4642 __ bind(&loop);
4643 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
4644 __ sw(a2, MemOperand(a3));
4645 __ Branch(&loop, USE_DELAY_SLOT);
4646 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
4647
4648 __ bind(&done);
4649 __ Addu(sp, sp, Operand(3 * kPointerSize));
4650 __ Ret();
4651
4652 __ bind(&slowcase);
4653 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004654}
4655
4656
4657void CallFunctionStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004658 Label slow;
4659
danno@chromium.org40cb8782011-05-25 07:58:50 +00004660 // The receiver might implicitly be the global object. This is
4661 // indicated by passing the hole as the receiver to the call
4662 // function stub.
4663 if (ReceiverMightBeImplicit()) {
4664 Label call;
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004665 // Get the receiver from the stack.
4666 // function, receiver [, arguments]
danno@chromium.org40cb8782011-05-25 07:58:50 +00004667 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
4668 // Call as function is indicated with the hole.
4669 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4670 __ Branch(&call, ne, t0, Operand(at));
4671 // Patch the receiver on the stack with the global receiver object.
4672 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4673 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
4674 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4675 __ bind(&call);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004676 }
4677
4678 // Get the function to call from the stack.
4679 // function, receiver [, arguments]
4680 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
4681
4682 // Check that the function is really a JavaScript function.
4683 // a1: pushed function (to be verified)
4684 __ JumpIfSmi(a1, &slow);
4685 // Get the map of the function object.
4686 __ GetObjectType(a1, a2, a2);
4687 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
4688
4689 // Fast-case: Invoke the function now.
4690 // a1: pushed function
4691 ParameterCount actual(argc_);
danno@chromium.org40cb8782011-05-25 07:58:50 +00004692
4693 if (ReceiverMightBeImplicit()) {
4694 Label call_as_function;
4695 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4696 __ Branch(&call_as_function, eq, t0, Operand(at));
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00004697 __ InvokeFunction(a1,
4698 actual,
4699 JUMP_FUNCTION,
4700 NullCallWrapper(),
4701 CALL_AS_METHOD);
danno@chromium.org40cb8782011-05-25 07:58:50 +00004702 __ bind(&call_as_function);
4703 }
4704 __ InvokeFunction(a1,
4705 actual,
4706 JUMP_FUNCTION,
4707 NullCallWrapper(),
4708 CALL_AS_FUNCTION);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004709
4710 // Slow-case: Non-function called.
4711 __ bind(&slow);
4712 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4713 // of the original receiver from the call site).
4714 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4715 __ li(a0, Operand(argc_)); // Setup the number of arguments.
4716 __ mov(a2, zero_reg);
4717 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
4718 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4719 RelocInfo::CODE_TARGET);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004720}
4721
4722
4723// Unfortunately you have to run without snapshots to see most of these
4724// names in the profile since most compare stubs end up in the snapshot.
4725const char* CompareStub::GetName() {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004726 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4727 (lhs_.is(a1) && rhs_.is(a0)));
4728
4729 if (name_ != NULL) return name_;
4730 const int kMaxNameLength = 100;
4731 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
4732 kMaxNameLength);
4733 if (name_ == NULL) return "OOM";
4734
4735 const char* cc_name;
4736 switch (cc_) {
4737 case lt: cc_name = "LT"; break;
4738 case gt: cc_name = "GT"; break;
4739 case le: cc_name = "LE"; break;
4740 case ge: cc_name = "GE"; break;
4741 case eq: cc_name = "EQ"; break;
4742 case ne: cc_name = "NE"; break;
4743 default: cc_name = "UnknownCondition"; break;
4744 }
4745
4746 const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
4747 const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
4748
4749 const char* strict_name = "";
4750 if (strict_ && (cc_ == eq || cc_ == ne)) {
4751 strict_name = "_STRICT";
4752 }
4753
4754 const char* never_nan_nan_name = "";
4755 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
4756 never_nan_nan_name = "_NO_NAN";
4757 }
4758
4759 const char* include_number_compare_name = "";
4760 if (!include_number_compare_) {
4761 include_number_compare_name = "_NO_NUMBER";
4762 }
4763
4764 const char* include_smi_compare_name = "";
4765 if (!include_smi_compare_) {
4766 include_smi_compare_name = "_NO_SMI";
4767 }
4768
4769 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
4770 "CompareStub_%s%s%s%s%s%s",
4771 cc_name,
4772 lhs_name,
4773 rhs_name,
4774 strict_name,
4775 never_nan_nan_name,
4776 include_number_compare_name,
4777 include_smi_compare_name);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004778 return name_;
4779}
4780
4781
4782int CompareStub::MinorKey() {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004783 // Encode the two parameters in a unique 16 bit value.
4784 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
4785 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4786 (lhs_.is(a1) && rhs_.is(a0)));
4787 return ConditionField::encode(static_cast<unsigned>(cc_))
4788 | RegisterField::encode(lhs_.is(a0))
4789 | StrictField::encode(strict_)
4790 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
4791 | IncludeSmiCompareField::encode(include_smi_compare_);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004792}
4793
4794
karlklose@chromium.org83a47282011-05-11 11:54:09 +00004795// StringCharCodeAtGenerator.
lrn@chromium.org7516f052011-03-30 08:52:27 +00004796void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004797 Label flat_string;
4798 Label ascii_string;
4799 Label got_char_code;
4800
4801 ASSERT(!t0.is(scratch_));
4802 ASSERT(!t0.is(index_));
4803 ASSERT(!t0.is(result_));
4804 ASSERT(!t0.is(object_));
4805
4806 // If the receiver is a smi trigger the non-string case.
4807 __ JumpIfSmi(object_, receiver_not_string_);
4808
4809 // Fetch the instance type of the receiver into result register.
4810 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4811 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4812 // If the receiver is not a string trigger the non-string case.
4813 __ And(t0, result_, Operand(kIsNotStringMask));
4814 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
4815
4816 // If the index is non-smi trigger the non-smi case.
4817 __ JumpIfNotSmi(index_, &index_not_smi_);
4818
4819 // Put smi-tagged index into scratch register.
4820 __ mov(scratch_, index_);
4821 __ bind(&got_smi_index_);
4822
4823 // Check for index out of range.
4824 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
4825 __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
4826
4827 // We need special handling for non-flat strings.
4828 STATIC_ASSERT(kSeqStringTag == 0);
4829 __ And(t0, result_, Operand(kStringRepresentationMask));
4830 __ Branch(&flat_string, eq, t0, Operand(zero_reg));
4831
4832 // Handle non-flat strings.
4833 __ And(t0, result_, Operand(kIsConsStringMask));
4834 __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
4835
4836 // ConsString.
4837 // Check whether the right hand side is the empty string (i.e. if
4838 // this is really a flat string in a cons string). If that is not
4839 // the case we would rather go to the runtime system now to flatten
4840 // the string.
4841 __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
4842 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
4843 __ Branch(&call_runtime_, ne, result_, Operand(t0));
4844
4845 // Get the first of the two strings and load its instance type.
4846 __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
4847 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4848 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4849 // If the first cons component is also non-flat, then go to runtime.
4850 STATIC_ASSERT(kSeqStringTag == 0);
4851
4852 __ And(t0, result_, Operand(kStringRepresentationMask));
4853 __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
4854
4855 // Check for 1-byte or 2-byte string.
4856 __ bind(&flat_string);
4857 STATIC_ASSERT(kAsciiStringTag != 0);
4858 __ And(t0, result_, Operand(kStringEncodingMask));
4859 __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
4860
4861 // 2-byte string.
4862 // Load the 2-byte character code into the result register. We can
4863 // add without shifting since the smi tag size is the log2 of the
4864 // number of bytes in a two-byte character.
4865 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
4866 __ Addu(scratch_, object_, Operand(scratch_));
4867 __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
4868 __ Branch(&got_char_code);
4869
4870 // ASCII string.
4871 // Load the byte into the result register.
4872 __ bind(&ascii_string);
4873
4874 __ srl(t0, scratch_, kSmiTagSize);
4875 __ Addu(scratch_, object_, t0);
4876
4877 __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
4878
4879 __ bind(&got_char_code);
4880 __ sll(result_, result_, kSmiTagSize);
4881 __ bind(&exit_);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004882}
4883
4884
4885void StringCharCodeAtGenerator::GenerateSlow(
4886 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004887 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4888
4889 // Index is not a smi.
4890 __ bind(&index_not_smi_);
4891 // If index is a heap number, try converting it to an integer.
4892 __ CheckMap(index_,
4893 scratch_,
4894 Heap::kHeapNumberMapRootIndex,
4895 index_not_number_,
danno@chromium.org40cb8782011-05-25 07:58:50 +00004896 DONT_DO_SMI_CHECK);
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004897 call_helper.BeforeCall(masm);
4898 // Consumed by runtime conversion function:
4899 __ Push(object_, index_, index_);
4900 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4901 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4902 } else {
4903 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4904 // NumberToSmi discards numbers that are not exact integers.
4905 __ CallRuntime(Runtime::kNumberToSmi, 1);
4906 }
4907
4908 // Save the conversion result before the pop instructions below
4909 // have a chance to overwrite it.
4910
4911 __ Move(scratch_, v0);
4912
4913 __ pop(index_);
4914 __ pop(object_);
4915 // Reload the instance type.
4916 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
4917 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
4918 call_helper.AfterCall(masm);
4919 // If index is still not a smi, it must be out of range.
4920 __ JumpIfNotSmi(scratch_, index_out_of_range_);
4921 // Otherwise, return to the fast path.
4922 __ Branch(&got_smi_index_);
4923
4924 // Call runtime. We get here when the receiver is a string and the
4925 // index is a number, but the code of getting the actual character
4926 // is too complex (e.g., when the string needs to be flattened).
4927 __ bind(&call_runtime_);
4928 call_helper.BeforeCall(masm);
4929 __ Push(object_, index_);
4930 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4931
4932 __ Move(result_, v0);
4933
4934 call_helper.AfterCall(masm);
4935 __ jmp(&exit_);
4936
4937 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
lrn@chromium.org7516f052011-03-30 08:52:27 +00004938}
4939
4940
4941// -------------------------------------------------------------------------
4942// StringCharFromCodeGenerator
4943
4944void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004945 // Fast case of Heap::LookupSingleCharacterStringFromCode.
4946
4947 ASSERT(!t0.is(result_));
4948 ASSERT(!t0.is(code_));
4949
4950 STATIC_ASSERT(kSmiTag == 0);
4951 STATIC_ASSERT(kSmiShiftSize == 0);
4952 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
4953 __ And(t0,
4954 code_,
4955 Operand(kSmiTagMask |
4956 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
4957 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
4958
4959 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4960 // At this point code register contains smi tagged ASCII char code.
4961 STATIC_ASSERT(kSmiTag == 0);
4962 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
4963 __ Addu(result_, result_, t0);
4964 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
4965 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
4966 __ Branch(&slow_case_, eq, result_, Operand(t0));
4967 __ bind(&exit_);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004968}
4969
4970
4971void StringCharFromCodeGenerator::GenerateSlow(
4972 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004973 __ Abort("Unexpected fallthrough to CharFromCode slow case");
4974
4975 __ bind(&slow_case_);
4976 call_helper.BeforeCall(masm);
4977 __ push(code_);
4978 __ CallRuntime(Runtime::kCharFromCode, 1);
4979 __ Move(result_, v0);
4980
4981 call_helper.AfterCall(masm);
4982 __ Branch(&exit_);
4983
4984 __ Abort("Unexpected fallthrough from CharFromCode slow case");
lrn@chromium.org7516f052011-03-30 08:52:27 +00004985}
4986
4987
4988// -------------------------------------------------------------------------
4989// StringCharAtGenerator
4990
4991void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004992 char_code_at_generator_.GenerateFast(masm);
4993 char_from_code_generator_.GenerateFast(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00004994}
4995
4996
4997void StringCharAtGenerator::GenerateSlow(
4998 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00004999 char_code_at_generator_.GenerateSlow(masm, call_helper);
5000 char_from_code_generator_.GenerateSlow(masm, call_helper);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005001}
5002
5003
5004class StringHelper : public AllStatic {
5005 public:
5006 // Generate code for copying characters using a simple loop. This should only
5007 // be used in places where the number of characters is small and the
5008 // additional setup and checking in GenerateCopyCharactersLong adds too much
5009 // overhead. Copying of overlapping regions is not supported.
5010 // Dest register ends at the position after the last character written.
5011 static void GenerateCopyCharacters(MacroAssembler* masm,
5012 Register dest,
5013 Register src,
5014 Register count,
5015 Register scratch,
5016 bool ascii);
5017
5018 // Generate code for copying a large number of characters. This function
5019 // is allowed to spend extra time setting up conditions to make copying
5020 // faster. Copying of overlapping regions is not supported.
5021 // Dest register ends at the position after the last character written.
5022 static void GenerateCopyCharactersLong(MacroAssembler* masm,
5023 Register dest,
5024 Register src,
5025 Register count,
5026 Register scratch1,
5027 Register scratch2,
5028 Register scratch3,
5029 Register scratch4,
5030 Register scratch5,
5031 int flags);
5032
5033
5034 // Probe the symbol table for a two character string. If the string is
5035 // not found by probing a jump to the label not_found is performed. This jump
5036 // does not guarantee that the string is not in the symbol table. If the
5037 // string is found the code falls through with the string in register r0.
5038 // Contents of both c1 and c2 registers are modified. At the exit c1 is
5039 // guaranteed to contain halfword with low and high bytes equal to
5040 // initial contents of c1 and c2 respectively.
5041 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5042 Register c1,
5043 Register c2,
5044 Register scratch1,
5045 Register scratch2,
5046 Register scratch3,
5047 Register scratch4,
5048 Register scratch5,
5049 Label* not_found);
5050
5051 // Generate string hash.
5052 static void GenerateHashInit(MacroAssembler* masm,
5053 Register hash,
5054 Register character);
5055
5056 static void GenerateHashAddCharacter(MacroAssembler* masm,
5057 Register hash,
5058 Register character);
5059
5060 static void GenerateHashGetHash(MacroAssembler* masm,
5061 Register hash);
5062
5063 private:
5064 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5065};
5066
5067
5068void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5069 Register dest,
5070 Register src,
5071 Register count,
5072 Register scratch,
5073 bool ascii) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005074 Label loop;
5075 Label done;
5076 // This loop just copies one character at a time, as it is only used for
5077 // very short strings.
5078 if (!ascii) {
5079 __ addu(count, count, count);
5080 }
5081 __ Branch(&done, eq, count, Operand(zero_reg));
5082 __ addu(count, dest, count); // Count now points to the last dest byte.
5083
5084 __ bind(&loop);
5085 __ lbu(scratch, MemOperand(src));
5086 __ addiu(src, src, 1);
5087 __ sb(scratch, MemOperand(dest));
5088 __ addiu(dest, dest, 1);
5089 __ Branch(&loop, lt, dest, Operand(count));
5090
5091 __ bind(&done);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005092}
5093
5094
5095enum CopyCharactersFlags {
5096 COPY_ASCII = 1,
5097 DEST_ALWAYS_ALIGNED = 2
5098};
5099
5100
5101void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5102 Register dest,
5103 Register src,
5104 Register count,
5105 Register scratch1,
5106 Register scratch2,
5107 Register scratch3,
5108 Register scratch4,
5109 Register scratch5,
5110 int flags) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005111 bool ascii = (flags & COPY_ASCII) != 0;
5112 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5113
5114 if (dest_always_aligned && FLAG_debug_code) {
5115 // Check that destination is actually word aligned if the flag says
5116 // that it is.
5117 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5118 __ Check(eq,
5119 "Destination of copy not aligned.",
5120 scratch4,
5121 Operand(zero_reg));
5122 }
5123
5124 const int kReadAlignment = 4;
5125 const int kReadAlignmentMask = kReadAlignment - 1;
5126 // Ensure that reading an entire aligned word containing the last character
5127 // of a string will not read outside the allocated area (because we pad up
5128 // to kObjectAlignment).
5129 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5130 // Assumes word reads and writes are little endian.
5131 // Nothing to do for zero characters.
5132 Label done;
5133
5134 if (!ascii) {
5135 __ addu(count, count, count);
5136 }
5137 __ Branch(&done, eq, count, Operand(zero_reg));
5138
5139 Label byte_loop;
5140 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5141 __ Subu(scratch1, count, Operand(8));
5142 __ Addu(count, dest, Operand(count));
5143 Register limit = count; // Read until src equals this.
5144 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5145
5146 if (!dest_always_aligned) {
5147 // Align dest by byte copying. Copies between zero and three bytes.
5148 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5149 Label dest_aligned;
5150 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5151 Label aligned_loop;
5152 __ bind(&aligned_loop);
5153 __ lbu(scratch1, MemOperand(src));
5154 __ addiu(src, src, 1);
5155 __ sb(scratch1, MemOperand(dest));
5156 __ addiu(dest, dest, 1);
5157 __ addiu(scratch4, scratch4, 1);
5158 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5159 __ bind(&dest_aligned);
5160 }
5161
5162 Label simple_loop;
5163
5164 __ And(scratch4, src, Operand(kReadAlignmentMask));
5165 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5166
5167 // Loop for src/dst that are not aligned the same way.
5168 // This loop uses lwl and lwr instructions. These instructions
5169 // depend on the endianness, and the implementation assumes little-endian.
5170 {
5171 Label loop;
5172 __ bind(&loop);
5173 __ lwr(scratch1, MemOperand(src));
5174 __ Addu(src, src, Operand(kReadAlignment));
5175 __ lwl(scratch1, MemOperand(src, -1));
5176 __ sw(scratch1, MemOperand(dest));
5177 __ Addu(dest, dest, Operand(kReadAlignment));
5178 __ Subu(scratch2, limit, dest);
5179 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5180 }
5181
5182 __ Branch(&byte_loop);
5183
5184 // Simple loop.
5185 // Copy words from src to dest, until less than four bytes left.
5186 // Both src and dest are word aligned.
5187 __ bind(&simple_loop);
5188 {
5189 Label loop;
5190 __ bind(&loop);
5191 __ lw(scratch1, MemOperand(src));
5192 __ Addu(src, src, Operand(kReadAlignment));
5193 __ sw(scratch1, MemOperand(dest));
5194 __ Addu(dest, dest, Operand(kReadAlignment));
5195 __ Subu(scratch2, limit, dest);
5196 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5197 }
5198
5199 // Copy bytes from src to dest until dest hits limit.
5200 __ bind(&byte_loop);
5201 // Test if dest has already reached the limit.
5202 __ Branch(&done, ge, dest, Operand(limit));
5203 __ lbu(scratch1, MemOperand(src));
5204 __ addiu(src, src, 1);
5205 __ sb(scratch1, MemOperand(dest));
5206 __ addiu(dest, dest, 1);
5207 __ Branch(&byte_loop);
5208
5209 __ bind(&done);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005210}
5211
5212
5213void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5214 Register c1,
5215 Register c2,
5216 Register scratch1,
5217 Register scratch2,
5218 Register scratch3,
5219 Register scratch4,
5220 Register scratch5,
5221 Label* not_found) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005222 // Register scratch3 is the general scratch register in this function.
5223 Register scratch = scratch3;
5224
5225 // Make sure that both characters are not digits as such strings has a
5226 // different hash algorithm. Don't try to look for these in the symbol table.
5227 Label not_array_index;
5228 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5229 __ Branch(&not_array_index,
5230 Ugreater,
5231 scratch,
5232 Operand(static_cast<int>('9' - '0')));
5233 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5234
5235 // If check failed combine both characters into single halfword.
5236 // This is required by the contract of the method: code at the
5237 // not_found branch expects this combination in c1 register.
5238 Label tmp;
5239 __ sll(scratch1, c2, kBitsPerByte);
5240 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5241 __ Or(c1, c1, scratch1);
5242 __ bind(&tmp);
5243 __ Branch(not_found,
5244 Uless_equal,
5245 scratch,
5246 Operand(static_cast<int>('9' - '0')));
5247
5248 __ bind(&not_array_index);
5249 // Calculate the two character string hash.
5250 Register hash = scratch1;
5251 StringHelper::GenerateHashInit(masm, hash, c1);
5252 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5253 StringHelper::GenerateHashGetHash(masm, hash);
5254
5255 // Collect the two characters in a register.
5256 Register chars = c1;
5257 __ sll(scratch, c2, kBitsPerByte);
5258 __ Or(chars, chars, scratch);
5259
5260 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5261 // hash: hash of two character string.
5262
5263 // Load symbol table.
5264 // Load address of first element of the symbol table.
5265 Register symbol_table = c2;
5266 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5267
5268 Register undefined = scratch4;
5269 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5270
5271 // Calculate capacity mask from the symbol table capacity.
5272 Register mask = scratch2;
5273 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5274 __ sra(mask, mask, 1);
5275 __ Addu(mask, mask, -1);
5276
5277 // Calculate untagged address of the first element of the symbol table.
5278 Register first_symbol_table_element = symbol_table;
5279 __ Addu(first_symbol_table_element, symbol_table,
5280 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5281
5282 // Registers.
5283 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5284 // hash: hash of two character string
5285 // mask: capacity mask
5286 // first_symbol_table_element: address of the first element of
5287 // the symbol table
5288 // undefined: the undefined object
5289 // scratch: -
5290
5291 // Perform a number of probes in the symbol table.
5292 static const int kProbes = 4;
5293 Label found_in_symbol_table;
5294 Label next_probe[kProbes];
5295 Register candidate = scratch5; // Scratch register contains candidate.
5296 for (int i = 0; i < kProbes; i++) {
5297 // Calculate entry in symbol table.
5298 if (i > 0) {
5299 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5300 } else {
5301 __ mov(candidate, hash);
5302 }
5303
5304 __ And(candidate, candidate, Operand(mask));
5305
5306 // Load the entry from the symble table.
5307 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5308 __ sll(scratch, candidate, kPointerSizeLog2);
5309 __ Addu(scratch, scratch, first_symbol_table_element);
5310 __ lw(candidate, MemOperand(scratch));
5311
5312 // If entry is undefined no string with this hash can be found.
5313 Label is_string;
5314 __ GetObjectType(candidate, scratch, scratch);
5315 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5316
5317 __ Branch(not_found, eq, undefined, Operand(candidate));
5318 // Must be null (deleted entry).
5319 if (FLAG_debug_code) {
5320 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
5321 __ Assert(eq, "oddball in symbol table is not undefined or null",
5322 scratch, Operand(candidate));
5323 }
5324 __ jmp(&next_probe[i]);
5325
5326 __ bind(&is_string);
5327
5328 // Check that the candidate is a non-external ASCII string. The instance
5329 // type is still in the scratch register from the CompareObjectType
5330 // operation.
5331 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5332
5333 // If length is not 2 the string is not a candidate.
5334 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5335 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5336
5337 // Check if the two characters match.
5338 // Assumes that word load is little endian.
5339 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5340 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5341 __ bind(&next_probe[i]);
5342 }
5343
5344 // No matching 2 character string found by probing.
5345 __ jmp(not_found);
5346
5347 // Scratch register contains result when we fall through to here.
5348 Register result = candidate;
5349 __ bind(&found_in_symbol_table);
5350 __ mov(v0, result);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005351}
5352
5353
5354void StringHelper::GenerateHashInit(MacroAssembler* masm,
5355 Register hash,
5356 Register character) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005357 // hash = character + (character << 10);
5358 __ sll(hash, character, 10);
5359 __ addu(hash, hash, character);
5360 // hash ^= hash >> 6;
5361 __ sra(at, hash, 6);
5362 __ xor_(hash, hash, at);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005363}
5364
5365
5366void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5367 Register hash,
5368 Register character) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005369 // hash += character;
5370 __ addu(hash, hash, character);
5371 // hash += hash << 10;
5372 __ sll(at, hash, 10);
5373 __ addu(hash, hash, at);
5374 // hash ^= hash >> 6;
5375 __ sra(at, hash, 6);
5376 __ xor_(hash, hash, at);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005377}
5378
5379
5380void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5381 Register hash) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005382 // hash += hash << 3;
5383 __ sll(at, hash, 3);
5384 __ addu(hash, hash, at);
5385 // hash ^= hash >> 11;
5386 __ sra(at, hash, 11);
5387 __ xor_(hash, hash, at);
5388 // hash += hash << 15;
5389 __ sll(at, hash, 15);
5390 __ addu(hash, hash, at);
5391
5392 // if (hash == 0) hash = 27;
5393 __ ori(at, zero_reg, 27);
5394 __ movz(hash, at, hash);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005395}
5396
5397
5398void SubStringStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005399 Label sub_string_runtime;
5400 // Stack frame on entry.
5401 // ra: return address
5402 // sp[0]: to
5403 // sp[4]: from
5404 // sp[8]: string
5405
5406 // This stub is called from the native-call %_SubString(...), so
5407 // nothing can be assumed about the arguments. It is tested that:
5408 // "string" is a sequential string,
5409 // both "from" and "to" are smis, and
5410 // 0 <= from <= to <= string.length.
5411 // If any of these assumptions fail, we call the runtime system.
5412
5413 static const int kToOffset = 0 * kPointerSize;
5414 static const int kFromOffset = 1 * kPointerSize;
5415 static const int kStringOffset = 2 * kPointerSize;
5416
5417 Register to = t2;
5418 Register from = t3;
5419
5420 // Check bounds and smi-ness.
5421 __ lw(to, MemOperand(sp, kToOffset));
5422 __ lw(from, MemOperand(sp, kFromOffset));
5423 STATIC_ASSERT(kFromOffset == kToOffset + 4);
5424 STATIC_ASSERT(kSmiTag == 0);
5425 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5426
5427 __ JumpIfNotSmi(from, &sub_string_runtime);
5428 __ JumpIfNotSmi(to, &sub_string_runtime);
5429
5430 __ sra(a3, from, kSmiTagSize); // Remove smi tag.
5431 __ sra(t5, to, kSmiTagSize); // Remove smi tag.
5432
5433 // a3: from index (untagged smi)
5434 // t5: to index (untagged smi)
5435
5436 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
5437
5438 __ subu(a2, t5, a3);
5439 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
5440
5441 // Special handling of sub-strings of length 1 and 2. One character strings
5442 // are handled in the runtime system (looked up in the single character
5443 // cache). Two character strings are looked for in the symbol cache.
5444 __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5445
5446 // Both to and from are smis.
5447
5448 // a2: result string length
5449 // a3: from index (untagged smi)
5450 // t2: (a.k.a. to): to (smi)
5451 // t3: (a.k.a. from): from offset (smi)
5452 // t5: to index (untagged smi)
5453
5454 // Make sure first argument is a sequential (or flat) string.
5455 __ lw(t1, MemOperand(sp, kStringOffset));
5456 __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
5457
5458 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
5459 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
5460 __ And(t4, a1, Operand(kIsNotStringMask));
5461
5462 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
5463
5464 // a1: instance type
5465 // a2: result string length
5466 // a3: from index (untagged smi)
5467 // t1: string
5468 // t2: (a.k.a. to): to (smi)
5469 // t3: (a.k.a. from): from offset (smi)
5470 // t5: to index (untagged smi)
5471
5472 Label seq_string;
5473 __ And(t0, a1, Operand(kStringRepresentationMask));
5474 STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5475 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5476
5477 // External strings go to runtime.
5478 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
5479
5480 // Sequential strings are handled directly.
5481 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
5482
5483 // Cons string. Try to recurse (once) on the first substring.
5484 // (This adds a little more generality than necessary to handle flattened
5485 // cons strings, but not much).
5486 __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
5487 __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
5488 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5489 STATIC_ASSERT(kSeqStringTag == 0);
5490 // Cons and External strings go to runtime.
5491 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
5492
5493 // Definitly a sequential string.
5494 __ bind(&seq_string);
5495
5496 // a1: instance type
5497 // a2: result string length
5498 // a3: from index (untagged smi)
5499 // t1: string
5500 // t2: (a.k.a. to): to (smi)
5501 // t3: (a.k.a. from): from offset (smi)
5502 // t5: to index (untagged smi)
5503
5504 __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
5505 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
5506 to = no_reg;
5507
5508 // a1: instance type
5509 // a2: result string length
5510 // a3: from index (untagged smi)
5511 // t1: string
5512 // t3: (a.k.a. from): from offset (smi)
5513 // t5: to index (untagged smi)
5514
5515 // Check for flat ASCII string.
5516 Label non_ascii_flat;
5517 STATIC_ASSERT(kTwoByteStringTag == 0);
5518
5519 __ And(t4, a1, Operand(kStringEncodingMask));
5520 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
5521
5522 Label result_longer_than_two;
5523 __ Branch(&result_longer_than_two, gt, a2, Operand(2));
5524
5525 // Sub string of length 2 requested.
5526 // Get the two characters forming the sub string.
5527 __ Addu(t1, t1, Operand(a3));
5528 __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
5529 __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
5530
5531 // Try to lookup two character string in symbol table.
5532 Label make_two_character_string;
5533 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5534 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
5535 Counters* counters = masm->isolate()->counters();
5536 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5537 __ Addu(sp, sp, Operand(3 * kPointerSize));
5538 __ Ret();
5539
5540
5541 // a2: result string length.
5542 // a3: two characters combined into halfword in little endian byte order.
5543 __ bind(&make_two_character_string);
5544 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
5545 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5546 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5547 __ Addu(sp, sp, Operand(3 * kPointerSize));
5548 __ Ret();
5549
5550 __ bind(&result_longer_than_two);
5551
5552 // Allocate the result.
5553 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
5554
5555 // v0: result string.
5556 // a2: result string length.
5557 // a3: from index (untagged smi)
5558 // t1: string.
5559 // t3: (a.k.a. from): from offset (smi)
5560 // Locate first character of result.
5561 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5562 // Locate 'from' character of string.
5563 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5564 __ Addu(t1, t1, Operand(a3));
5565
5566 // v0: result string.
5567 // a1: first character of result string.
5568 // a2: result string length.
5569 // t1: first character of sub string to copy.
5570 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5571 StringHelper::GenerateCopyCharactersLong(
5572 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
5573 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5574 __ Addu(sp, sp, Operand(3 * kPointerSize));
5575 __ Ret();
5576
5577 __ bind(&non_ascii_flat);
5578 // a2: result string length.
5579 // t1: string.
5580 // t3: (a.k.a. from): from offset (smi)
5581 // Check for flat two byte string.
5582
5583 // Allocate the result.
5584 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
5585
5586 // v0: result string.
5587 // a2: result string length.
5588 // t1: string.
5589 // Locate first character of result.
5590 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5591 // Locate 'from' character of string.
5592 __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5593 // As "from" is a smi it is 2 times the value which matches the size of a two
5594 // byte character.
5595 __ Addu(t1, t1, Operand(from));
5596 from = no_reg;
5597
5598 // v0: result string.
5599 // a1: first character of result.
5600 // a2: result length.
5601 // t1: first character of string to copy.
5602 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5603 StringHelper::GenerateCopyCharactersLong(
5604 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
5605 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5606 __ Addu(sp, sp, Operand(3 * kPointerSize));
5607 __ Ret();
5608
5609 // Just jump to runtime to create the sub string.
5610 __ bind(&sub_string_runtime);
5611 __ TailCallRuntime(Runtime::kSubString, 3, 1);
5612}
5613
5614
5615void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5616 Register left,
5617 Register right,
5618 Register scratch1,
5619 Register scratch2,
5620 Register scratch3) {
5621 Register length = scratch1;
5622
5623 // Compare lengths.
5624 Label strings_not_equal, check_zero_length;
5625 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
5626 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5627 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
5628 __ bind(&strings_not_equal);
5629 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
5630 __ Ret();
5631
5632 // Check if the length is zero.
5633 Label compare_chars;
5634 __ bind(&check_zero_length);
5635 STATIC_ASSERT(kSmiTag == 0);
5636 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
5637 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5638 __ Ret();
5639
5640 // Compare characters.
5641 __ bind(&compare_chars);
5642
5643 GenerateAsciiCharsCompareLoop(masm,
5644 left, right, length, scratch2, scratch3, v0,
5645 &strings_not_equal);
5646
5647 // Characters are equal.
5648 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5649 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +00005650}
5651
5652
5653void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
lrn@chromium.org7516f052011-03-30 08:52:27 +00005654 Register left,
karlklose@chromium.org83a47282011-05-11 11:54:09 +00005655 Register right,
lrn@chromium.org7516f052011-03-30 08:52:27 +00005656 Register scratch1,
5657 Register scratch2,
5658 Register scratch3,
5659 Register scratch4) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005660 Label result_not_equal, compare_lengths;
5661 // Find minimum length and length difference.
5662 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
5663 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5664 __ Subu(scratch3, scratch1, Operand(scratch2));
5665 Register length_delta = scratch3;
5666 __ slt(scratch4, scratch2, scratch1);
5667 __ movn(scratch1, scratch2, scratch4);
5668 Register min_length = scratch1;
5669 STATIC_ASSERT(kSmiTag == 0);
5670 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
5671
5672 // Compare loop.
5673 GenerateAsciiCharsCompareLoop(masm,
5674 left, right, min_length, scratch2, scratch4, v0,
5675 &result_not_equal);
5676
5677 // Compare lengths - strings up to min-length are equal.
5678 __ bind(&compare_lengths);
5679 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
5680 // Use length_delta as result if it's zero.
5681 __ mov(scratch2, length_delta);
5682 __ mov(scratch4, zero_reg);
5683 __ mov(v0, zero_reg);
5684
5685 __ bind(&result_not_equal);
5686 // Conditionally update the result based either on length_delta or
5687 // the last comparion performed in the loop above.
5688 Label ret;
5689 __ Branch(&ret, eq, scratch2, Operand(scratch4));
5690 __ li(v0, Operand(Smi::FromInt(GREATER)));
5691 __ Branch(&ret, gt, scratch2, Operand(scratch4));
5692 __ li(v0, Operand(Smi::FromInt(LESS)));
5693 __ bind(&ret);
5694 __ Ret();
5695}
5696
5697
5698void StringCompareStub::GenerateAsciiCharsCompareLoop(
5699 MacroAssembler* masm,
5700 Register left,
5701 Register right,
5702 Register length,
5703 Register scratch1,
5704 Register scratch2,
5705 Register scratch3,
5706 Label* chars_not_equal) {
5707 // Change index to run from -length to -1 by adding length to string
5708 // start. This means that loop ends when index reaches zero, which
5709 // doesn't need an additional compare.
5710 __ SmiUntag(length);
5711 __ Addu(scratch1, length,
5712 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5713 __ Addu(left, left, Operand(scratch1));
5714 __ Addu(right, right, Operand(scratch1));
5715 __ Subu(length, zero_reg, length);
5716 Register index = length; // index = -length;
5717
5718
5719 // Compare loop.
5720 Label loop;
5721 __ bind(&loop);
5722 __ Addu(scratch3, left, index);
5723 __ lbu(scratch1, MemOperand(scratch3));
5724 __ Addu(scratch3, right, index);
5725 __ lbu(scratch2, MemOperand(scratch3));
5726 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
5727 __ Addu(index, index, 1);
5728 __ Branch(&loop, ne, index, Operand(zero_reg));
lrn@chromium.org7516f052011-03-30 08:52:27 +00005729}
5730
5731
5732void StringCompareStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005733 Label runtime;
5734
5735 Counters* counters = masm->isolate()->counters();
5736
5737 // Stack frame on entry.
5738 // sp[0]: right string
5739 // sp[4]: left string
5740 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
5741 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
5742
5743 Label not_same;
5744 __ Branch(&not_same, ne, a0, Operand(a1));
5745 STATIC_ASSERT(EQUAL == 0);
5746 STATIC_ASSERT(kSmiTag == 0);
5747 __ li(v0, Operand(Smi::FromInt(EQUAL)));
5748 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
5749 __ Addu(sp, sp, Operand(2 * kPointerSize));
5750 __ Ret();
5751
5752 __ bind(&not_same);
5753
5754 // Check that both objects are sequential ASCII strings.
5755 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
5756
5757 // Compare flat ASCII strings natively. Remove arguments from stack first.
5758 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
5759 __ Addu(sp, sp, Operand(2 * kPointerSize));
5760 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
5761
5762 __ bind(&runtime);
5763 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
lrn@chromium.org7516f052011-03-30 08:52:27 +00005764}
5765
5766
5767void StringAddStub::Generate(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00005768 Label string_add_runtime, call_builtin;
5769 Builtins::JavaScript builtin_id = Builtins::ADD;
5770
5771 Counters* counters = masm->isolate()->counters();
5772
5773 // Stack on entry:
5774 // sp[0]: second argument (right).
5775 // sp[4]: first argument (left).
5776
5777 // Load the two arguments.
5778 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
5779 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
5780
5781 // Make sure that both arguments are strings if not known in advance.
5782 if (flags_ == NO_STRING_ADD_FLAGS) {
5783 __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
5784 // Load instance types.
5785 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5786 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5787 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5788 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5789 STATIC_ASSERT(kStringTag == 0);
5790 // If either is not a string, go to runtime.
5791 __ Or(t4, t0, Operand(t1));
5792 __ And(t4, t4, Operand(kIsNotStringMask));
5793 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
5794 } else {
5795 // Here at least one of the arguments is definitely a string.
5796 // We convert the one that is not known to be a string.
5797 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5798 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5799 GenerateConvertArgument(
5800 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
5801 builtin_id = Builtins::STRING_ADD_RIGHT;
5802 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5803 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5804 GenerateConvertArgument(
5805 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
5806 builtin_id = Builtins::STRING_ADD_LEFT;
5807 }
5808 }
5809
5810 // Both arguments are strings.
5811 // a0: first string
5812 // a1: second string
5813 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5814 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5815 {
5816 Label strings_not_empty;
5817 // Check if either of the strings are empty. In that case return the other.
5818 // These tests use zero-length check on string-length whch is an Smi.
5819 // Assert that Smi::FromInt(0) is really 0.
5820 STATIC_ASSERT(kSmiTag == 0);
5821 ASSERT(Smi::FromInt(0) == 0);
5822 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
5823 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
5824 __ mov(v0, a0); // Assume we'll return first string (from a0).
5825 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
5826 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
5827 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
5828 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
5829 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
5830
5831 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5832 __ Addu(sp, sp, Operand(2 * kPointerSize));
5833 __ Ret();
5834
5835 __ bind(&strings_not_empty);
5836 }
5837
5838 // Untag both string-lengths.
5839 __ sra(a2, a2, kSmiTagSize);
5840 __ sra(a3, a3, kSmiTagSize);
5841
5842 // Both strings are non-empty.
5843 // a0: first string
5844 // a1: second string
5845 // a2: length of first string
5846 // a3: length of second string
5847 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5848 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5849 // Look at the length of the result of adding the two strings.
5850 Label string_add_flat_result, longer_than_two;
5851 // Adding two lengths can't overflow.
5852 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
5853 __ Addu(t2, a2, Operand(a3));
5854 // Use the symbol table when adding two one character strings, as it
5855 // helps later optimizations to return a symbol here.
5856 __ Branch(&longer_than_two, ne, t2, Operand(2));
5857
5858 // Check that both strings are non-external ASCII strings.
5859 if (flags_ != NO_STRING_ADD_FLAGS) {
5860 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5861 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5862 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5863 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5864 }
5865 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
5866 &string_add_runtime);
5867
5868 // Get the two characters forming the sub string.
5869 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
5870 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
5871
5872 // Try to lookup two character string in symbol table. If it is not found
5873 // just allocate a new one.
5874 Label make_two_character_string;
5875 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5876 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
5877 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5878 __ Addu(sp, sp, Operand(2 * kPointerSize));
5879 __ Ret();
5880
5881 __ bind(&make_two_character_string);
5882 // Resulting string has length 2 and first chars of two strings
5883 // are combined into single halfword in a2 register.
5884 // So we can fill resulting string without two loops by a single
5885 // halfword store instruction (which assumes that processor is
5886 // in a little endian mode).
5887 __ li(t2, Operand(2));
5888 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
5889 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5890 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5891 __ Addu(sp, sp, Operand(2 * kPointerSize));
5892 __ Ret();
5893
5894 __ bind(&longer_than_two);
5895 // Check if resulting string will be flat.
5896 __ Branch(&string_add_flat_result, lt, t2,
5897 Operand(String::kMinNonFlatLength));
5898 // Handle exceptionally long strings in the runtime system.
5899 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
5900 ASSERT(IsPowerOf2(String::kMaxLength + 1));
5901 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
5902 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
5903
5904 // If result is not supposed to be flat, allocate a cons string object.
5905 // If both strings are ASCII the result is an ASCII cons string.
5906 if (flags_ != NO_STRING_ADD_FLAGS) {
5907 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5908 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5909 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5910 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5911 }
5912 Label non_ascii, allocated, ascii_data;
5913 STATIC_ASSERT(kTwoByteStringTag == 0);
5914 // Branch to non_ascii if either string-encoding field is zero (non-ascii).
5915 __ And(t4, t0, Operand(t1));
5916 __ And(t4, t4, Operand(kStringEncodingMask));
5917 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
5918
5919 // Allocate an ASCII cons string.
5920 __ bind(&ascii_data);
5921 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
5922 __ bind(&allocated);
5923 // Fill the fields of the cons string.
5924 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
5925 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
5926 __ mov(v0, t3);
5927 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
5928 __ Addu(sp, sp, Operand(2 * kPointerSize));
5929 __ Ret();
5930
5931 __ bind(&non_ascii);
5932 // At least one of the strings is two-byte. Check whether it happens
5933 // to contain only ASCII characters.
5934 // t0: first instance type.
5935 // t1: second instance type.
5936 // Branch to if _both_ instances have kAsciiDataHintMask set.
5937 __ And(at, t0, Operand(kAsciiDataHintMask));
5938 __ and_(at, at, t1);
5939 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
5940
5941 __ xor_(t0, t0, t1);
5942 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5943 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
5944 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
5945
5946 // Allocate a two byte cons string.
5947 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
5948 __ Branch(&allocated);
5949
5950 // Handle creating a flat result. First check that both strings are
5951 // sequential and that they have the same encoding.
5952 // a0: first string
5953 // a1: second string
5954 // a2: length of first string
5955 // a3: length of second string
5956 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5957 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5958 // t2: sum of lengths.
5959 __ bind(&string_add_flat_result);
5960 if (flags_ != NO_STRING_ADD_FLAGS) {
5961 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5962 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
5963 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5964 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
5965 }
5966 // Check that both strings are sequential, meaning that we
5967 // branch to runtime if either string tag is non-zero.
5968 STATIC_ASSERT(kSeqStringTag == 0);
5969 __ Or(t4, t0, Operand(t1));
5970 __ And(t4, t4, Operand(kStringRepresentationMask));
5971 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
5972
5973 // Now check if both strings have the same encoding (ASCII/Two-byte).
5974 // a0: first string
5975 // a1: second string
5976 // a2: length of first string
5977 // a3: length of second string
5978 // t0: first string instance type
5979 // t1: second string instance type
5980 // t2: sum of lengths.
5981 Label non_ascii_string_add_flat_result;
5982 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
5983 __ xor_(t3, t1, t0);
5984 __ And(t3, t3, Operand(kStringEncodingMask));
5985 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
5986 // And see if it's ASCII (0) or two-byte (1).
5987 __ And(t3, t0, Operand(kStringEncodingMask));
5988 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
5989
5990 // Both strings are sequential ASCII strings. We also know that they are
5991 // short (since the sum of the lengths is less than kMinNonFlatLength).
5992 // t2: length of resulting flat string
5993 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
5994 // Locate first character of result.
5995 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5996 // Locate first character of first argument.
5997 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5998 // a0: first character of first string.
5999 // a1: second string.
6000 // a2: length of first string.
6001 // a3: length of second string.
6002 // t2: first character of result.
6003 // t3: result string.
6004 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
6005
6006 // Load second argument and locate first character.
6007 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6008 // a1: first character of second string.
6009 // a3: length of second string.
6010 // t2: next character of result.
6011 // t3: result string.
6012 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6013 __ mov(v0, t3);
6014 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6015 __ Addu(sp, sp, Operand(2 * kPointerSize));
6016 __ Ret();
6017
6018 __ bind(&non_ascii_string_add_flat_result);
6019 // Both strings are sequential two byte strings.
6020 // a0: first string.
6021 // a1: second string.
6022 // a2: length of first string.
6023 // a3: length of second string.
6024 // t2: sum of length of strings.
6025 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6026 // a0: first string.
6027 // a1: second string.
6028 // a2: length of first string.
6029 // a3: length of second string.
6030 // t3: result string.
6031
6032 // Locate first character of result.
6033 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6034 // Locate first character of first argument.
6035 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6036
6037 // a0: first character of first string.
6038 // a1: second string.
6039 // a2: length of first string.
6040 // a3: length of second string.
6041 // t2: first character of result.
6042 // t3: result string.
6043 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6044
6045 // Locate first character of second argument.
6046 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6047
6048 // a1: first character of second string.
6049 // a3: length of second string.
6050 // t2: next character of result (after copy of first string).
6051 // t3: result string.
6052 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6053
6054 __ mov(v0, t3);
6055 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6056 __ Addu(sp, sp, Operand(2 * kPointerSize));
6057 __ Ret();
6058
6059 // Just jump to runtime to add the two strings.
6060 __ bind(&string_add_runtime);
6061 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6062
6063 if (call_builtin.is_linked()) {
6064 __ bind(&call_builtin);
6065 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6066 }
6067}
6068
6069
6070void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6071 int stack_offset,
6072 Register arg,
6073 Register scratch1,
6074 Register scratch2,
6075 Register scratch3,
6076 Register scratch4,
6077 Label* slow) {
6078 // First check if the argument is already a string.
6079 Label not_string, done;
6080 __ JumpIfSmi(arg, &not_string);
6081 __ GetObjectType(arg, scratch1, scratch1);
6082 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6083
6084 // Check the number to string cache.
6085 Label not_cached;
6086 __ bind(&not_string);
6087 // Puts the cached result into scratch1.
6088 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6089 arg,
6090 scratch1,
6091 scratch2,
6092 scratch3,
6093 scratch4,
6094 false,
6095 &not_cached);
6096 __ mov(arg, scratch1);
6097 __ sw(arg, MemOperand(sp, stack_offset));
6098 __ jmp(&done);
6099
6100 // Check if the argument is a safe string wrapper.
6101 __ bind(&not_cached);
6102 __ JumpIfSmi(arg, slow);
6103 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6104 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6105 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6106 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6107 __ And(scratch2, scratch2, scratch4);
6108 __ Branch(slow, ne, scratch2, Operand(scratch4));
6109 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6110 __ sw(arg, MemOperand(sp, stack_offset));
6111
6112 __ bind(&done);
lrn@chromium.org7516f052011-03-30 08:52:27 +00006113}
6114
6115
6116void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006117 ASSERT(state_ == CompareIC::SMIS);
6118 Label miss;
6119 __ Or(a2, a1, a0);
6120 __ JumpIfNotSmi(a2, &miss);
6121
6122 if (GetCondition() == eq) {
6123 // For equality we do not care about the sign of the result.
6124 __ Subu(v0, a0, a1);
6125 } else {
6126 // Untag before subtracting to avoid handling overflow.
6127 __ SmiUntag(a1);
6128 __ SmiUntag(a0);
6129 __ Subu(v0, a1, a0);
6130 }
6131 __ Ret();
6132
6133 __ bind(&miss);
6134 GenerateMiss(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00006135}
6136
6137
6138void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006139 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6140
6141 Label generic_stub;
6142 Label unordered;
6143 Label miss;
6144 __ And(a2, a1, Operand(a0));
6145 __ JumpIfSmi(a2, &generic_stub);
6146
6147 __ GetObjectType(a0, a2, a2);
6148 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6149 __ GetObjectType(a1, a2, a2);
6150 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6151
6152 // Inlining the double comparison and falling back to the general compare
6153 // stub if NaN is involved or FPU is unsupported.
6154 if (CpuFeatures::IsSupported(FPU)) {
6155 CpuFeatures::Scope scope(FPU);
6156
6157 // Load left and right operand.
6158 __ Subu(a2, a1, Operand(kHeapObjectTag));
6159 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6160 __ Subu(a2, a0, Operand(kHeapObjectTag));
6161 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6162
6163 Label fpu_eq, fpu_lt, fpu_gt;
6164 // Compare operands (test if unordered).
6165 __ c(UN, D, f0, f2);
6166 // Don't base result on status bits when a NaN is involved.
6167 __ bc1t(&unordered);
6168 __ nop();
6169
6170 // Test if equal.
6171 __ c(EQ, D, f0, f2);
6172 __ bc1t(&fpu_eq);
6173 __ nop();
6174
6175 // Test if unordered or less (unordered case is already handled).
6176 __ c(ULT, D, f0, f2);
6177 __ bc1t(&fpu_lt);
6178 __ nop();
6179
6180 // Otherwise it's greater.
6181 __ bc1f(&fpu_gt);
6182 __ nop();
6183
6184 // Return a result of -1, 0, or 1.
6185 __ bind(&fpu_eq);
6186 __ li(v0, Operand(EQUAL));
6187 __ Ret();
6188
6189 __ bind(&fpu_lt);
6190 __ li(v0, Operand(LESS));
6191 __ Ret();
6192
6193 __ bind(&fpu_gt);
6194 __ li(v0, Operand(GREATER));
6195 __ Ret();
6196
6197 __ bind(&unordered);
6198 }
6199
6200 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6201 __ bind(&generic_stub);
6202 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6203
6204 __ bind(&miss);
6205 GenerateMiss(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00006206}
6207
6208
karlklose@chromium.org83a47282011-05-11 11:54:09 +00006209void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006210 ASSERT(state_ == CompareIC::SYMBOLS);
6211 Label miss;
6212
6213 // Registers containing left and right operands respectively.
6214 Register left = a1;
6215 Register right = a0;
6216 Register tmp1 = a2;
6217 Register tmp2 = a3;
6218
6219 // Check that both operands are heap objects.
6220 __ JumpIfEitherSmi(left, right, &miss);
6221
6222 // Check that both operands are symbols.
6223 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6224 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6225 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6226 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6227 STATIC_ASSERT(kSymbolTag != 0);
6228 __ And(tmp1, tmp1, Operand(tmp2));
6229 __ And(tmp1, tmp1, kIsSymbolMask);
6230 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6231 // Make sure a0 is non-zero. At this point input operands are
6232 // guaranteed to be non-zero.
6233 ASSERT(right.is(a0));
6234 STATIC_ASSERT(EQUAL == 0);
6235 STATIC_ASSERT(kSmiTag == 0);
6236 __ mov(v0, right);
6237 // Symbols are compared by identity.
6238 __ Ret(ne, left, Operand(right));
6239 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6240 __ Ret();
6241
6242 __ bind(&miss);
6243 GenerateMiss(masm);
6244}
karlklose@chromium.org83a47282011-05-11 11:54:09 +00006245
6246
6247void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006248 ASSERT(state_ == CompareIC::STRINGS);
6249 Label miss;
6250
6251 // Registers containing left and right operands respectively.
6252 Register left = a1;
6253 Register right = a0;
6254 Register tmp1 = a2;
6255 Register tmp2 = a3;
6256 Register tmp3 = t0;
6257 Register tmp4 = t1;
6258 Register tmp5 = t2;
6259
6260 // Check that both operands are heap objects.
6261 __ JumpIfEitherSmi(left, right, &miss);
6262
6263 // Check that both operands are strings. This leaves the instance
6264 // types loaded in tmp1 and tmp2.
6265 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6266 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6267 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6268 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6269 STATIC_ASSERT(kNotStringTag != 0);
6270 __ Or(tmp3, tmp1, tmp2);
6271 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6272 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6273
6274 // Fast check for identical strings.
6275 Label left_ne_right;
6276 STATIC_ASSERT(EQUAL == 0);
6277 STATIC_ASSERT(kSmiTag == 0);
6278 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6279 __ mov(v0, zero_reg); // In the delay slot.
6280 __ Ret();
6281 __ bind(&left_ne_right);
6282
6283 // Handle not identical strings.
6284
6285 // Check that both strings are symbols. If they are, we're done
6286 // because we already know they are not identical.
6287 ASSERT(GetCondition() == eq);
6288 STATIC_ASSERT(kSymbolTag != 0);
6289 __ And(tmp3, tmp1, Operand(tmp2));
6290 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6291 Label is_symbol;
6292 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6293 __ mov(v0, a0); // In the delay slot.
6294 // Make sure a0 is non-zero. At this point input operands are
6295 // guaranteed to be non-zero.
6296 ASSERT(right.is(a0));
6297 __ Ret();
6298 __ bind(&is_symbol);
6299
6300 // Check that both strings are sequential ASCII.
6301 Label runtime;
6302 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6303 &runtime);
6304
6305 // Compare flat ASCII strings. Returns when done.
6306 StringCompareStub::GenerateFlatAsciiStringEquals(
6307 masm, left, right, tmp1, tmp2, tmp3);
6308
6309 // Handle more complex cases in runtime.
6310 __ bind(&runtime);
6311 __ Push(left, right);
6312 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6313
6314 __ bind(&miss);
6315 GenerateMiss(masm);
karlklose@chromium.org83a47282011-05-11 11:54:09 +00006316}
6317
6318
lrn@chromium.org7516f052011-03-30 08:52:27 +00006319void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006320 ASSERT(state_ == CompareIC::OBJECTS);
6321 Label miss;
6322 __ And(a2, a1, Operand(a0));
6323 __ JumpIfSmi(a2, &miss);
6324
6325 __ GetObjectType(a0, a2, a2);
6326 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6327 __ GetObjectType(a1, a2, a2);
6328 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6329
6330 ASSERT(GetCondition() == eq);
6331 __ Subu(v0, a0, Operand(a1));
6332 __ Ret();
6333
6334 __ bind(&miss);
6335 GenerateMiss(masm);
lrn@chromium.org7516f052011-03-30 08:52:27 +00006336}
6337
6338
6339void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006340 __ Push(a1, a0);
6341 __ push(ra);
6342
6343 // Call the runtime system in a fresh internal frame.
6344 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6345 masm->isolate());
6346 __ EnterInternalFrame();
6347 __ Push(a1, a0);
6348 __ li(t0, Operand(Smi::FromInt(op_)));
6349 __ push(t0);
6350 __ CallExternalReference(miss, 3);
6351 __ LeaveInternalFrame();
6352 // Compute the entry point of the rewritten stub.
6353 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6354 // Restore registers.
6355 __ pop(ra);
6356 __ pop(a0);
6357 __ pop(a1);
6358 __ Jump(a2);
6359}
6360
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00006361
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006362void DirectCEntryStub::Generate(MacroAssembler* masm) {
6363 // No need to pop or drop anything, LeaveExitFrame will restore the old
6364 // stack, thus dropping the allocated space for the return value.
6365 // The saved ra is after the reserved stack space for the 4 args.
6366 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6367
6368 if (FLAG_debug_code && EnableSlowAsserts()) {
6369 // In case of an error the return address may point to a memory area
6370 // filled with kZapValue by the GC.
6371 // Dereference the address and check for this.
6372 __ lw(t0, MemOperand(t9));
6373 __ Assert(ne, "Received invalid return address.", t0,
6374 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6375 }
6376 __ Jump(t9);
lrn@chromium.org7516f052011-03-30 08:52:27 +00006377}
6378
6379
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006380void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6381 ExternalReference function) {
6382 __ li(t9, Operand(function));
6383 this->GenerateCall(masm, t9);
6384}
6385
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00006386
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00006387void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6388 Register target) {
6389 __ Move(t9, target);
6390 __ AssertStackIsAligned();
6391 // Allocate space for arg slots.
6392 __ Subu(sp, sp, kCArgsSlotsSize);
6393
6394 // Block the trampoline pool through the whole function to make sure the
6395 // number of generated instructions is constant.
6396 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6397
6398 // We need to get the current 'pc' value, which is not available on MIPS.
6399 Label find_ra;
6400 masm->bal(&find_ra); // ra = pc + 8.
6401 masm->nop(); // Branch delay slot nop.
6402 masm->bind(&find_ra);
6403
6404 const int kNumInstructionsToJump = 6;
6405 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6406 // Push return address (accessible to GC through exit frame pc).
6407 // This spot for ra was reserved in EnterExitFrame.
6408 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6409 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6410 RelocInfo::CODE_TARGET), true);
6411 // Call the function.
6412 masm->Jump(t9);
6413 // Make sure the stored 'ra' points to this position.
6414 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6415}
6416
6417
6418MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6419 MacroAssembler* masm,
6420 Label* miss,
6421 Label* done,
6422 Register receiver,
6423 Register properties,
6424 String* name,
6425 Register scratch0) {
6426// If names of slots in range from 1 to kProbes - 1 for the hash value are
6427 // not equal to the name and kProbes-th slot is not used (its name is the
6428 // undefined value), it guarantees the hash table doesn't contain the
6429 // property. It's true even if some slots represent deleted properties
6430 // (their names are the null value).
6431 for (int i = 0; i < kInlinedProbes; i++) {
6432 // scratch0 points to properties hash.
6433 // Compute the masked index: (hash + i + i * i) & mask.
6434 Register index = scratch0;
6435 // Capacity is smi 2^n.
6436 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
6437 __ Subu(index, index, Operand(1));
6438 __ And(index, index, Operand(
6439 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6440
6441 // Scale the index by multiplying by the entry size.
6442 ASSERT(StringDictionary::kEntrySize == 3);
6443 // index *= 3.
6444 __ mov(at, index);
6445 __ sll(index, index, 1);
6446 __ Addu(index, index, at);
6447
6448 Register entity_name = scratch0;
6449 // Having undefined at this place means the name is not contained.
6450 ASSERT_EQ(kSmiTagSize, 1);
6451 Register tmp = properties;
6452
6453 __ sll(scratch0, index, 1);
6454 __ Addu(tmp, properties, scratch0);
6455 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6456
6457 ASSERT(!tmp.is(entity_name));
6458 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6459 __ Branch(done, eq, entity_name, Operand(tmp));
6460
6461 if (i != kInlinedProbes - 1) {
6462 // Stop if found the property.
6463 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
6464
6465 // Check if the entry name is not a symbol.
6466 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6467 __ lbu(entity_name,
6468 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6469 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
6470 __ Branch(miss, eq, scratch0, Operand(zero_reg));
6471
6472 // Restore the properties.
6473 __ lw(properties,
6474 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6475 }
6476 }
6477
6478 const int spill_mask =
6479 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
6480 a2.bit() | a1.bit() | a0.bit());
6481
6482 __ MultiPush(spill_mask);
6483 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6484 __ li(a1, Operand(Handle<String>(name)));
6485 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6486 MaybeObject* result = masm->TryCallStub(&stub);
6487 if (result->IsFailure()) return result;
6488 __ MultiPop(spill_mask);
6489
6490 __ Branch(done, eq, v0, Operand(zero_reg));
6491 __ Branch(miss, ne, v0, Operand(zero_reg));
6492 return result;
6493}
6494
6495
6496// Probe the string dictionary in the |elements| register. Jump to the
6497// |done| label if a property with the given name is found. Jump to
6498// the |miss| label otherwise.
6499// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6500void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6501 Label* miss,
6502 Label* done,
6503 Register elements,
6504 Register name,
6505 Register scratch1,
6506 Register scratch2) {
6507 // Assert that name contains a string.
6508 if (FLAG_debug_code) __ AbortIfNotString(name);
6509
6510 // Compute the capacity mask.
6511 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
6512 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
6513 __ Subu(scratch1, scratch1, Operand(1));
6514
6515 // Generate an unrolled loop that performs a few probes before
6516 // giving up. Measurements done on Gmail indicate that 2 probes
6517 // cover ~93% of loads from dictionaries.
6518 for (int i = 0; i < kInlinedProbes; i++) {
6519 // Compute the masked index: (hash + i + i * i) & mask.
6520 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6521 if (i > 0) {
6522 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6523 // the hash in a separate instruction. The value hash + i + i * i is right
6524 // shifted in the following and instruction.
6525 ASSERT(StringDictionary::GetProbeOffset(i) <
6526 1 << (32 - String::kHashFieldOffset));
6527 __ Addu(scratch2, scratch2, Operand(
6528 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6529 }
6530 __ srl(scratch2, scratch2, String::kHashShift);
6531 __ And(scratch2, scratch1, scratch2);
6532
6533 // Scale the index by multiplying by the element size.
6534 ASSERT(StringDictionary::kEntrySize == 3);
6535 // scratch2 = scratch2 * 3.
6536
6537 __ mov(at, scratch2);
6538 __ sll(scratch2, scratch2, 1);
6539 __ Addu(scratch2, scratch2, at);
6540
6541 // Check if the key is identical to the name.
6542 __ sll(at, scratch2, 2);
6543 __ Addu(scratch2, elements, at);
6544 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
6545 __ Branch(done, eq, name, Operand(at));
6546 }
6547
6548 const int spill_mask =
6549 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
6550 a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
6551 ~(scratch1.bit() | scratch2.bit());
6552
6553 __ MultiPush(spill_mask);
6554 __ Move(a0, elements);
6555 __ Move(a1, name);
6556 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6557 __ CallStub(&stub);
6558 __ mov(scratch2, a2);
6559 __ MultiPop(spill_mask);
6560
6561 __ Branch(done, ne, v0, Operand(zero_reg));
6562 __ Branch(miss, eq, v0, Operand(zero_reg));
6563}
6564
6565
6566void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6567 // Registers:
6568 // result: StringDictionary to probe
6569 // a1: key
6570 // : StringDictionary to probe.
6571 // index_: will hold an index of entry if lookup is successful.
6572 // might alias with result_.
6573 // Returns:
6574 // result_ is zero if lookup failed, non zero otherwise.
6575
6576 Register result = v0;
6577 Register dictionary = a0;
6578 Register key = a1;
6579 Register index = a2;
6580 Register mask = a3;
6581 Register hash = t0;
6582 Register undefined = t1;
6583 Register entry_key = t2;
6584
6585 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6586
6587 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
6588 __ sra(mask, mask, kSmiTagSize);
6589 __ Subu(mask, mask, Operand(1));
6590
6591 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
6592
6593 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6594
6595 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6596 // Compute the masked index: (hash + i + i * i) & mask.
6597 // Capacity is smi 2^n.
6598 if (i > 0) {
6599 // Add the probe offset (i + i * i) left shifted to avoid right shifting
6600 // the hash in a separate instruction. The value hash + i + i * i is right
6601 // shifted in the following and instruction.
6602 ASSERT(StringDictionary::GetProbeOffset(i) <
6603 1 << (32 - String::kHashFieldOffset));
6604 __ Addu(index, hash, Operand(
6605 StringDictionary::GetProbeOffset(i) << String::kHashShift));
6606 } else {
6607 __ mov(index, hash);
6608 }
6609 __ srl(index, index, String::kHashShift);
6610 __ And(index, mask, index);
6611
6612 // Scale the index by multiplying by the entry size.
6613 ASSERT(StringDictionary::kEntrySize == 3);
6614 // index *= 3.
6615 __ mov(at, index);
6616 __ sll(index, index, 1);
6617 __ Addu(index, index, at);
6618
6619
6620 ASSERT_EQ(kSmiTagSize, 1);
6621 __ sll(index, index, 2);
6622 __ Addu(index, index, dictionary);
6623 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
6624
6625 // Having undefined at this place means the name is not contained.
6626 __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
6627
6628 // Stop if found the property.
6629 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
6630
6631 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6632 // Check if the entry name is not a symbol.
6633 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
6634 __ lbu(entry_key,
6635 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
6636 __ And(result, entry_key, Operand(kIsSymbolMask));
6637 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
6638 }
6639 }
6640
6641 __ bind(&maybe_in_dictionary);
6642 // If we are doing negative lookup then probing failure should be
6643 // treated as a lookup success. For positive lookup probing failure
6644 // should be treated as lookup failure.
6645 if (mode_ == POSITIVE_LOOKUP) {
6646 __ mov(result, zero_reg);
6647 __ Ret();
6648 }
6649
6650 __ bind(&in_dictionary);
6651 __ li(result, 1);
6652 __ Ret();
6653
6654 __ bind(&not_in_dictionary);
6655 __ mov(result, zero_reg);
6656 __ Ret();
lrn@chromium.org7516f052011-03-30 08:52:27 +00006657}
6658
6659
6660#undef __
6661
6662} } // namespace v8::internal
6663
6664#endif // V8_TARGET_ARCH_MIPS