Roll V8 back to 3.6
Roll back to V8 3.6 to fix x86 build, we don't have ucontext.h.
This reverts commits:
5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b
c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
592a9fc1d8ea420377a2e7efd0600e20b058be2b
Bug: 5688872
Change-Id: Ic961bb5e65b778e98bbfb71cce71d99fa949e995
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f772db9..36450c9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -98,9 +98,9 @@
&gc,
TAG_OBJECT);
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -122,6 +122,7 @@
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -155,19 +156,21 @@
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Copy the global object from the previous context.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -186,119 +189,6 @@
}
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- fail,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-}
-
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -306,6 +196,10 @@
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -313,59 +207,57 @@
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
__ b(eq, &slow_case);
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
+ if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
__ push(r3);
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
+ __ LoadRoot(ip, expected_map_index);
+ __ cmp(r3, ip);
__ Assert(eq, message);
__ pop(r3);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
// Return and remove the on-stack parameters.
__ add(sp, sp, Operand(3 * kPointerSize));
@@ -376,49 +268,6 @@
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ cmp(r0, Operand(size >> kPointerSizeLog2));
- __ b(ne, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -480,7 +329,7 @@
__ b(gt, ¬_special);
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
+ static const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
@@ -583,9 +432,7 @@
Label is_smi, done;
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
+ __ JumpIfSmi(object, &is_smi);
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
@@ -607,6 +454,7 @@
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
+ __ SmiUntag(scratch1, object);
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
if (destination == kCoreRegisters) {
@@ -641,10 +489,11 @@
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
+ Label is_smi;
Label done;
Label not_in_int32_range;
- __ UntagAndJumpIfSmi(dst, object, &done);
+ __ JumpIfSmi(object, &is_smi);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_number);
@@ -664,6 +513,10 @@
scratch1,
scratch2,
scratch3);
+ __ jmp(&done);
+
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
__ bind(&done);
}
@@ -706,7 +559,7 @@
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
- // Get mantissa[51:20].
+ // Get mantisssa[51:20].
// Get the position of the first set bit.
__ CountLeadingZeros(dst1, int_scratch, scratch2);
@@ -836,7 +689,10 @@
Label done;
- __ UntagAndJumpIfSmi(dst, object, &done);
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
@@ -937,7 +793,7 @@
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
@@ -982,11 +838,9 @@
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 0, 2);
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -1003,29 +857,6 @@
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1366,8 +1197,6 @@
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
-
- AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1385,7 +1214,7 @@
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1777,8 +1606,6 @@
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1886,41 +1713,6 @@
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ stm(db_w, sp, kCallerSaved | lr.bit());
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
- }
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = r1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- }
- __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
-}
-
-
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -2074,13 +1866,12 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ __ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -2121,14 +1912,13 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
+ __ EnterInternalFrame();
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ __ LeaveInternalFrame();
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2238,10 +2028,6 @@
void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -2324,7 +2110,7 @@
__ cmp(ip, Operand(scratch2));
__ b(ne, ¬_smi_result);
// Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand(0));
+ __ tst(scratch1, Operand(scratch1));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ Ret(ne);
// We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -3296,12 +3082,10 @@
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
- __ cmp(r3, r5, eq);
+ __ b(ne, &calculate);
+ __ cmp(r3, r5);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into r0.
__ pop();
@@ -3314,9 +3098,6 @@
} // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
ExternalReference runtime_function =
@@ -3352,11 +3133,10 @@
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3369,15 +3149,14 @@
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
}
@@ -3394,7 +3173,6 @@
} else {
__ vmov(r0, r1, d2);
}
- AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3404,10 +3182,6 @@
__ CallCFunction(ExternalReference::math_cos_double_function(isolate),
0, 1);
break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
case TranscendentalCache::LOG:
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
0, 1);
@@ -3425,7 +3199,6 @@
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -3439,207 +3212,105 @@
}
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp3_scope(VFP3);
- const Register base = r1;
- const Register exponent = r2;
- const Register heapnumbermap = r5;
- const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
+ Label call_runtime;
- Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = r0;
+ const Register exponent = r1;
+ const Register heapnumbermap = r5;
+ const Register heapnumber = r6;
+ const DoubleRegister double_base = d0;
+ const DoubleRegister double_exponent = d1;
+ const DoubleRegister double_result = d2;
+ const SwVfpRegister single_scratch = s0;
+ const Register scratch = r9;
+ const Register scratch2 = r7;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ // Convert base to double value and store it in d0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ vmov(single_scratch, base);
+ __ vcvt_f64_s32(double_base, single_scratch);
+ __ b(&convert_exponent);
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+ __ bind(&base_not_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
-
+ // Base is a heapnumber. Load it into double register.
__ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
- __ bind(&base_is_smi);
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ bind(&unpack_exponent);
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(lr);
+ __ PrepareCallCFunction(1, 1, scratch);
+ __ SetCallCDoubleArguments(double_base, exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
+ __ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
+ // Exponent is a heapnumber. Load it into double register.
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ vcvt_u32_f64(single_scratch, double_exponent);
- // We do not check for NaN or Infinity here because comparing numbers on
- // ARM correctly distinguishes NaNs. We end up calling the built-in.
- __ vcvt_f64_u32(double_scratch, single_scratch);
- __ VFPCompareAndSetFlags(double_scratch, double_exponent);
- __ b(eq, &int_exponent_convert);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ vmov(double_scratch, 0.5);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, ¬_plus_half);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vneg(double_result, double_scratch, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vsqrt(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(¬_plus_half);
- __ vmov(double_scratch, -0.5);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vmov(double_result, kDoubleRegZero, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1);
- __ vsqrt(double_scratch, double_scratch);
- __ vdiv(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
__ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
- __ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(scratch, single_scratch);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
- __ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0);
-
- // Get absolute value of exponent.
- __ cmp(scratch, Operand(0));
- __ mov(scratch2, Operand(0), LeaveCC, mi);
- __ sub(scratch, scratch2, scratch, LeaveCC, mi);
-
- Label while_true;
- __ bind(&while_true);
- __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
- __ vmul(double_result, double_result, double_scratch, cs);
- __ vmul(double_scratch, double_scratch, double_scratch, ne);
- __ b(ne, &while_true);
-
- __ cmp(exponent, Operand(0));
- __ b(ge, &done);
- __ vmov(double_scratch, 1.0);
- __ vdiv(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ VFPCompareAndSetFlags(double_result, 0.0);
- __ b(ne, &done);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, exponent);
- __ vcvt_f64_s32(double_exponent, single_scratch);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(r0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret(2);
- } else {
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret();
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
}
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
}
@@ -3648,34 +3319,14 @@
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ __ Throw(r0);
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
- code->set_is_pregenerated(true);
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ __ ThrowUncatchable(type, r0);
}
@@ -3779,7 +3430,8 @@
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r3, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3817,10 +3469,9 @@
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
- // Set up argc and the builtin function in callee-saved registers.
+ // Setup argc and the builtin function in callee-saved registers.
__ mov(r4, Operand(r0));
__ mov(r5, Operand(r1));
@@ -3859,27 +3510,13 @@
true);
__ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
__ bind(&throw_termination_exception);
- __ ThrowUncatchable(r0);
+ GenerateThrowUncatchable(masm, TERMINATION);
__ bind(&throw_normal_exception);
- __ Throw(r0);
+ GenerateThrowTOS(masm);
}
@@ -3890,7 +3527,7 @@
// r3: argc
// [sp+0]: argv
- Label invoke, handler_entry, exit;
+ Label invoke, exit;
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
@@ -3911,7 +3548,7 @@
// r2: receiver
// r3: argc
- // Set up argv in r4.
+ // Setup argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(VFP3)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
@@ -3934,7 +3571,7 @@
__ ldr(r5, MemOperand(r5));
__ Push(r8, r7, r6, r5);
- // Set up frame pointer for the frame to be pushed.
+ // Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
@@ -3953,33 +3590,31 @@
__ bind(&cont);
__ push(ip);
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
+ // Call a faked try-block that does the invoke.
+ __ bl(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r5, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -4073,7 +3708,7 @@
const Register inline_site = r9;
const Register scratch = r2;
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
+ const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
@@ -4090,9 +3725,11 @@
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+ __ cmp(function, ip);
__ b(ne, &miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+ __ cmp(map, ip);
__ b(ne, &miss);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4101,7 +3738,7 @@
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -4122,8 +3759,7 @@
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ str(map, MemOperand(scratch));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -4215,11 +3851,10 @@
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4237,7 +3872,7 @@
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
- const int kDisplacement =
+ static const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smi.
@@ -4392,7 +4027,7 @@
__ str(r3, FieldMemOperand(r0, i));
}
- // Set up the callee in-object property.
+ // Setup the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4405,7 +4040,7 @@
Heap::kArgumentsLengthIndex * kPointerSize;
__ str(r2, FieldMemOperand(r0, kLengthOffset));
- // Set up the elements pointer in the allocated arguments object.
+ // Setup the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
@@ -4500,7 +4135,7 @@
__ Ret();
// Do the runtime call to allocate the arguments object.
- // r2 = argument count (tagged)
+ // r2 = argument count (taggged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4573,7 +4208,7 @@
// Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
- // Set up the elements pointer in the allocated arguments object and
+ // Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@@ -4585,7 +4220,7 @@
// Copy the fixed array slots.
Label loop;
- // Set up r4 to point to the first array slot.
+ // Setup r4 to point to the first array slot.
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement r2 with kPointerSize on each iteration.
@@ -4615,6 +4250,10 @@
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4622,10 +4261,10 @@
// sp[8]: subject string
// sp[12]: JSRegExp object
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime, invoke_regexp;
@@ -4646,7 +4285,7 @@
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand(0));
+ __ tst(r0, Operand(r0));
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -4717,7 +4356,8 @@
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
@@ -4735,39 +4375,25 @@
Label seq_string;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
- __ and_(r1,
- r0,
- Operand(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask),
- SetCC);
+ // First check for flat string.
+ __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
- // r1: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ Label cons_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(lt, &cons_string);
- __ b(eq, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
+ __ b(eq, &runtime);
// String is sliced.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4778,7 +4404,8 @@
// String is a cons string, check whether it is flat.
__ bind(&cons_string);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
+ __ cmp(r0, r1);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
// Is first part of cons or parent of slice a flat string?
@@ -4787,8 +4414,7 @@
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
-
+ __ b(ne, &runtime);
__ bind(&seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -4824,8 +4450,8 @@
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 8;
- const int kParameterRegisters = 4;
+ static const int kRegExpExecuteArguments = 8;
+ static const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -4854,7 +4480,8 @@
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4905,7 +4532,8 @@
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4920,10 +4548,10 @@
Label termination_exception;
__ b(eq, &termination_exception);
- __ Throw(r0);
+ __ Throw(r0); // Expects thrown value in r0.
__ bind(&termination_exception);
- __ ThrowUncatchable(r0);
+ __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
__ bind(&failure);
// For failure and exception return null.
@@ -4947,25 +4575,16 @@
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
+ __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ mov(r2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- r2,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ mov(r3, last_match_info_elements);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4996,26 +4615,6 @@
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
- // External string. Short external strings have already been ruled out.
- // r0: scratch
- __ bind(&external_string);
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
- }
- __ ldr(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ sub(subject,
- subject,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string);
-
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -5071,11 +4670,11 @@
// Set input, index and length fields from arguments.
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ ldr(r1, MemOperand(sp, kPointerSize * 1));
+ __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+ __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+ __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
@@ -5097,9 +4696,9 @@
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
- __ cmp(r5, Operand(0));
+ __ tst(r5, Operand(r5));
__ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
+ __ b(le, &done); // Jump if r1 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
__ jmp(&loop);
@@ -5113,48 +4712,7 @@
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
-
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // r1 : the function to call
- // r2 : cache cell for call target
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -5169,12 +4727,16 @@
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc_ * kPointerSize));
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
@@ -5212,7 +4774,7 @@
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ SetCallKind(r5, CALL_AS_FUNCTION);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -5223,7 +4785,7 @@
// of the original receiver from the call site).
__ bind(&non_function);
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
+ __ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD);
@@ -5232,48 +4794,6 @@
}
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // r0 : number of arguments
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function_call;
-
- // Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
-
- // Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r3: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
void CompareStub::PrintName(StringStream* stream) {
@@ -5335,41 +4855,100 @@
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(index_));
+ __ cmp(ip, Operand(scratch_));
__ b(ls, index_out_of_range_);
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
- StringCharLoadGenerator::Generate(masm,
- object_,
- index_,
- result_,
- &call_runtime_);
+ // Handle non-flat strings.
+ __ and_(result_, result_, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ __ cmp(result_, Operand(kExternalStringTag));
+ __ b(gt, &sliced_string);
+ __ b(eq, &call_runtime_);
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ Label assure_seq_string;
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(ne, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ jmp(&assure_seq_string);
+
+ // SlicedString, unpack and add offset.
+ __ bind(&sliced_string);
+ __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+ __ add(scratch_, scratch_, result_);
+ __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+ // Assure that we are dealing with a sequential string. Go to runtime if not.
+ __ bind(&assure_seq_string);
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // Check that parent is not an external string. Go to runtime otherwise.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(ne, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ tst(result_, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ add(scratch_, object_, Operand(scratch_));
+ __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- result_,
+ scratch_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_, index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -5380,14 +4959,15 @@
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(index_, r0);
+ __ Move(scratch_, r0);
+ __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -5396,7 +4976,6 @@
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
@@ -5425,15 +5004,15 @@
STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_, Operand(ip));
__ b(eq, &slow_case_);
__ bind(&exit_);
}
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5458,8 +5037,7 @@
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -5714,7 +5292,7 @@
// scratch: -
// Perform a number of probes in the symbol table.
- const int kProbes = 4;
+ static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@@ -5743,11 +5321,11 @@
__ cmp(undefined, candidate);
__ b(eq, not_found);
- // Must be the hole (deleted entry).
+ // Must be null (deleted entry).
if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole");
+ __ Assert(eq, "oddball in symbol table is not undefined or null");
}
__ jmp(&next_probe[i]);
@@ -5839,28 +5417,41 @@
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
- __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
+ // Check bounds and smi-ness.
+ Register to = r6;
+ Register from = r7;
+
+ __ Ldrd(to, from, MemOperand(sp, kToOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
+ __ mov(r2, Operand(to, ASR, 1), SetCC);
+ __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
// If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
- // We want to bailout to runtime here if From is negative. In that case, the
- // next instruction is not executed and we fall through to bailing out to
- // runtime. pl is the opposite of mi.
- // Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, pl);
- __ b(mi, &runtime); // Fail if from > to.
+ __ b(mi, &runtime); // From is negative.
- // Make sure first argument is a string.
+ // Both to and from are smis.
+ __ sub(r2, r2, Operand(r3), SetCC);
+ __ b(mi, &runtime); // Fail if from > to.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache in
+ // generated code.
+ __ cmp(r2, Operand(2));
+ __ b(lt, &runtime);
+
+ // r2: result string length
+ // r3: from index (untagged smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
+ // Make sure first argument is a sequential (or flat) string.
__ ldr(r0, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
@@ -5875,15 +5466,67 @@
__ cmp(r2, Operand(r4, ASR, 1));
__ b(eq, &return_r0);
+ Label create_slice;
+ if (FLAG_string_slices) {
+ __ cmp(r2, Operand(SlicedString::kMinLength));
+ __ b(ge, &create_slice);
+ }
+
+ // r0: original string
+ // r1: instance type
+ // r2: result string length
+ // r3: from index (untagged smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
+ Label seq_string;
+ __ and_(r4, r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
+ __ cmp(r4, Operand(kConsStringTag));
+ __ b(gt, &runtime); // Slices and external strings go to runtime.
+ __ b(lt, &seq_string); // Sequential strings are handled directly.
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset));
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ tst(r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ b(ne, &runtime); // Cons, slices and external strings go to runtime.
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // r0: original string
+ // r1: instance type
+ // r2: result string length
+ // r3: from index (untagged smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
+ __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
+ __ cmp(r4, Operand(to));
+ __ b(lt, &runtime); // Fail if to > length.
+ to = no_reg;
+
+ // r0: original string or left hand side of the original cons string.
+ // r1: instance type
+ // r2: result string length
+ // r3: from index (untagged smi)
+ // r7 (a.k.a. from): from offset (smi)
+ // Check for flat ASCII string.
+ Label non_ascii_flat;
+ __ tst(r1, Operand(kStringEncodingMask));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ b(eq, &non_ascii_flat);
+
Label result_longer_than_two;
- // Check for special case of two character ASCII string, in which case
- // we do a lookup in the symbol table first.
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
- __ b(lt, &runtime);
- __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
-
+ // Sub string of length 2 requested.
// Get the two characters forming the sub string.
__ add(r0, r0, Operand(r3));
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -5893,6 +5536,7 @@
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
+ Counters* counters = masm->isolate()->counters();
__ jmp(&return_r0);
// r2: result string length.
@@ -5903,114 +5547,18 @@
__ jmp(&return_r0);
__ bind(&result_longer_than_two);
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r5.
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ tst(r1, Operand(kIsIndirectStringMask));
- __ b(eq, &seq_or_external_string);
- __ tst(r1, Operand(kSlicedNotConsMask));
- __ b(ne, &sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
+ // Locate 'from' character of string.
+ __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(from, ASR, 1));
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
+ // Allocate the result.
+ __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(r5, r0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- __ cmp(r2, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ b(lt, ©_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
- __ bind(&set_slice_header);
- __ mov(r3, Operand(r3, LSL, 1));
- __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ jmp(&return_r0);
-
- __ bind(©_routine);
- }
-
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kExternalStringTag));
- __ b(eq, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ tst(r1, Operand(kShortExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
- // r5 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_sequential);
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
-
- // Locate first character of substring to copy.
- __ add(r5, r5, r3);
+ // r0: result string
+ // r2: result string length
+ // r5: first character of substring to copy
+ // r7 (a.k.a. from): from offset (smi)
// Locate first character of result.
__ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -6023,16 +5571,30 @@
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ bind(&non_ascii_flat);
+ // r0: original string
+ // r2: result string length
+ // r7 (a.k.a. from): from offset (smi)
+ // Check for flat two byte string.
- // Locate first character of substring to copy.
+ // Locate 'from' character of string.
+ __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(r5, r5, Operand(r3, LSL, 1));
+ __ add(r5, r5, Operand(from));
+
+ // Allocate the result.
+ __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+ // r0: result string
+ // r2: result string length
+ // r5: first character of substring to copy
// Locate first character of result.
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ from = no_reg;
+
// r0: result string.
// r1: first character of result.
// r2: result length.
@@ -6040,9 +5602,72 @@
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ __ jmp(&return_r0);
+
+ if (FLAG_string_slices) {
+ __ bind(&create_slice);
+ // r0: original string
+ // r1: instance type
+ // r2: length
+ // r3: from index (untagged smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
+ Label allocate_slice, sliced_string, seq_string;
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r1, Operand(kStringRepresentationMask));
+ __ b(eq, &seq_string);
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ tst(r1, Operand(kIsIndirectStringMask));
+ // External string. Jump to runtime.
+ __ b(eq, &runtime);
+
+ __ tst(r1, Operand(kSlicedNotConsMask));
+ __ b(ne, &sliced_string);
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
+ __ LoadRoot(r9, Heap::kEmptyStringRootIndex);
+ __ cmp(r5, r9);
+ __ b(ne, &runtime);
+ __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
+ __ jmp(&allocate_slice);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+ __ add(r7, r7, r5);
+ __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ __ jmp(&allocate_slice);
+
+ __ bind(&seq_string);
+ // Sequential string. Just move string to the right register.
+ __ mov(r5, r0);
+
+ __ bind(&allocate_slice);
+ // r1: instance type of original string
+ // r2: length
+ // r5: underlying subject string
+ // r7 (a.k.a. from): from offset (smi)
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ tst(r1, Operand(kStringEncodingMask));
+ __ b(eq, &two_byte_slice);
+ __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime);
+ __ jmp(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime);
+ __ bind(&set_slice_header);
+ __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+ __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ }
__ bind(&return_r0);
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -6075,7 +5700,7 @@
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand(0));
+ __ tst(length, Operand(length));
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6108,7 +5733,7 @@
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand(0));
+ __ tst(min_length, Operand(min_length));
__ b(eq, &compare_lengths);
// Compare loop.
@@ -6199,7 +5824,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
+ Label string_add_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
Counters* counters = masm->isolate()->counters();
@@ -6214,7 +5839,7 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
+ __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6224,7 +5849,7 @@
// If either is not a string, go to runtime.
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
+ __ b(ne, &string_add_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -6293,7 +5918,7 @@
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &call_runtime);
+ &string_add_runtime);
// Get the two characters forming the sub string.
__ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -6315,7 +5940,7 @@
// halfword store instruction (which assumes that processor is
// in a little endian mode)
__ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -6323,14 +5948,14 @@
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
+ __ cmp(r6, Operand(String::kMinNonFlatLength));
__ b(lt, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
__ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
+ __ b(hs, &string_add_runtime);
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
@@ -6348,7 +5973,7 @@
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
@@ -6373,13 +5998,11 @@
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated);
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
// r0: first string
// r1: second string
// r2: length of first string
@@ -6387,7 +6010,6 @@
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
- Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -6395,88 +6017,97 @@
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
-
- // Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
-
+ // Check that both strings are sequential.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
- r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r7: first character of first string
- // r1: first character of second string
+ __ tst(r5, Operand(kStringRepresentationMask), eq);
+ __ b(ne, &string_add_runtime);
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // r0: first string.
+ // r1: second string.
// r2: length of first string.
// r3: length of second string.
- // r6: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
+ // r6: sum of lengths..
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ eor(r7, r4, Operand(r5));
+ __ tst(r7, Operand(kStringEncodingMask));
+ __ b(ne, &string_add_runtime);
+ // And see if it's ASCII or two-byte.
+ __ tst(r4, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // r6: length of resulting flat string
+ __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r0: first character of first string.
+ // r1: second string.
// r2: length of first string.
// r3: length of second string.
// r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+ // Load second argument and locate first character.
+ __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r1: first character of second string.
+ // r3: length of second string.
// r6: next character of result.
+ // r7: result string.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ __ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
+ // Both strings are sequential two byte strings.
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of length of strings.
+ __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r7: result string.
+
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r0: first character of first string.
+ // r1: second string.
// r2: length of first string.
// r3: length of second string.
// r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+ // Locate first character of second argument.
+ __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result (after copy of first string).
+ // r7: result string.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+ __ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
+ __ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -6560,15 +6191,15 @@
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
+ Label unordered;
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
+ __ b(ne, &miss);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ __ b(ne, &miss);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
@@ -6592,28 +6223,14 @@
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
__ Ret();
+
+ __ bind(&unordered);
}
- __ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &miss);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(eq, &unordered);
- }
-
__ bind(&miss);
GenerateMiss(masm);
}
@@ -6661,8 +6278,6 @@
ASSERT(state_ == CompareIC::STRINGS);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
-
// Registers containing left and right operands respectively.
Register left = r1;
Register right = r0;
@@ -6696,39 +6311,28 @@
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
- if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsSymbolMask));
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- __ Ret(ne);
- }
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsSymbolMask));
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ __ Ret(ne);
// Check that both strings are sequential ASCII.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
// Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
- }
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
__ bind(&miss);
GenerateMiss(masm);
@@ -6755,47 +6359,25 @@
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &miss);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(known_map_));
- __ b(ne, &miss);
- __ cmp(r3, Operand(known_map_));
- __ b(ne, &miss);
-
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ __ Push(r1, r0);
+ __ push(lr);
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
- }
-
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ __ EnterInternalFrame();
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+ // Compute the entry point of the rewritten stub.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(lr);
+ __ pop(r0);
+ __ pop(r1);
__ Jump(r2);
}
@@ -6828,18 +6410,19 @@
}
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
+ // (their names are the null value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
@@ -6867,17 +6450,10 @@
__ b(eq, done);
if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
// Stop if found the property.
__ cmp(entity_name, Operand(Handle<String>(name)));
__ b(eq, miss);
- Label the_hole;
- __ cmp(entity_name, tmp);
- __ b(eq, &the_hole);
-
// Check if the entry name is not a symbol.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
@@ -6885,8 +6461,6 @@
__ tst(entity_name, Operand(kIsSymbolMask));
__ b(eq, miss);
- __ bind(&the_hole);
-
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -6901,12 +6475,14 @@
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ tst(r0, Operand(r0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
+ return result;
}
@@ -6921,11 +6497,6 @@
Register name,
Register scratch1,
Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
-
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6969,17 +6540,11 @@
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
- if (name.is(r0)) {
- ASSERT(!elements.is(r1));
- __ Move(r1, name);
- __ Move(r0, elements);
- } else {
- __ Move(r0, elements);
- __ Move(r1, name);
- }
+ __ Move(r0, elements);
+ __ Move(r1, name);
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ tst(r0, Operand(r0));
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
@@ -6989,8 +6554,6 @@
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -7080,337 +6643,6 @@
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
- __ Move(address, regs_.address());
- __ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
- // -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(r2, r5, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
- __ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r1, r3, r0);
- __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
- __ Push(r5, r4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
- &slow_elements);
- __ Ret();
-}
-
#undef __
} } // namespace v8::internal