Update V8 to r5388 as required by WebKit r66666
Change-Id: Ib3c42e9b7226d22c65c7077c543fe31afe62a318
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 15c3198..a095ef7 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
namespace v8 {
@@ -95,10 +96,6 @@
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc+1.
- __ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -699,17 +696,6 @@
}
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
- __ mov(result,
- Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
// Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -1099,7 +1085,7 @@
Label generic_array_code;
// Get the Array function.
- GenerateLoadArrayFunction(masm, edi);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map.
@@ -1135,7 +1121,7 @@
if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which
// does always have a map.
- GenerateLoadArrayFunction(masm, ebx);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx);
__ cmp(edi, Operand(ebx));
__ Assert(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map.
@@ -1159,6 +1145,131 @@
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ __ IncrementCounter(&Counters::string_ctor_calls, 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
+ __ cmp(edi, Operand(ecx));
+ __ Assert(equal, "Unexpected String function");
+ }
+
+ // Load the first argument into eax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ test(eax, Operand(eax));
+ __ j(zero, &no_arguments);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ push(ecx);
+ __ mov(eax, ebx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ false, // Input is known to be smi?
+ ¬_cached);
+ __ IncrementCounter(&Counters::string_ctor_cached_number, 1);
+ __ bind(&argument_is_string);
+ // ----------- S t a t e -------------
+ // -- ebx : argument converted to string
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into eax.
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
+ JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(equal, "Unexpected string wrapper instance size");
+ __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
+ __ Assert(equal, "Unexpected unused properties of string wrapper");
+ }
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+
+ // Set properties and elements.
+ __ Set(ecx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+
+ // Set the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(¬_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ mov(ebx, eax);
+ __ IncrementCounter(&Counters::string_ctor_string_value, 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into ebx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(&Counters::string_ctor_conversions, 1);
+ __ EnterInternalFrame();
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ __ LeaveInternalFrame();
+ __ mov(ebx, eax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into ebx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ Set(ebx, Immediate(Factory::empty_string()));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ push(ecx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(&Counters::string_ctor_gc_required, 1);
+ __ EnterInternalFrame();
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ ret(0);
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, Operand(esp));
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
new file mode 100644
index 0000000..8106886
--- /dev/null
+++ b/src/ia32/code-stubs-ia32.cc
@@ -0,0 +1,4540 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
+#include "code-stubs.h"
+#include "bootstrapper.h"
+#include "jsregexp.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in esi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function info from the stack.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+ __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(Factory::the_hole_value()));
+ __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
+ __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
+ __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(ecx); // Temporarily remove return address.
+ __ pop(edx);
+ __ push(esi);
+ __ push(edx);
+ __ push(ecx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+
+ // Setup the fixed slots.
+ __ xor_(ebx, Operand(ebx)); // Set to NULL.
+ __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
+
+ // Copy the global object from the surrounding context. We go through the
+ // context in the function (ecx) to match the allocation behavior we have
+ // in the runtime system (see Heap::AllocateFunctionContext).
+ __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
+ __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+
+ // Initialize the rest of the slots to undefined.
+ __ mov(ebx, Factory::undefined_value());
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ mov(esi, Operand(eax));
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: constant elements.
+ // [esp + (2 * kPointerSize)]: literal index.
+ // [esp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Factory::undefined_value());
+ __ j(equal, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Handle<Map> expected_map;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map = Factory::fixed_array_map();
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map = Factory::fixed_cow_array_map();
+ }
+ __ push(ecx);
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
+ __ Assert(equal, message);
+ __ pop(ecx);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, ¬_string);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(¬_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ __ fldz();
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in eax.
+ __ bind(&true_result);
+ __ mov(eax, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ mov(eax, 0);
+ __ ret(1 * kPointerSize);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(Immediate(right));
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(Immediate(left));
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ mov(right_arg, right);
+ __ mov(left_arg, Immediate(left));
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+
+ // Test if operands are smis or heap numbers and load them
+ // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm);
+
+ // Test if operands are numbers (smi or HeapNumber objects), and load
+ // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
+ // either operand is not a number. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+};
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
+
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ __ or_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_AND) {
+ __ and_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_XOR) {
+ __ xor_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ }
+ }
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
+ switch (op_) {
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
+ break;
+
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
+ break;
+
+ default:
+ break;
+ }
+
+ // 3. Perform the smi check of the operands.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_smis, not_taken);
+
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
+ case Token::SUB:
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
+ // Remove tag from one of the operands (but keep sign).
+ __ SmiUntag(right);
+ // Do multiplication.
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ break;
+
+ case Token::DIV:
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+ // Check that the remainder is zero.
+ __ test(edx, Operand(edx));
+ __ j(not_zero, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(eax);
+ break;
+
+ case Token::MOD:
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, ¬_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
+ // Move remainder to register eax.
+ __ mov(eax, edx);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ break;
+ }
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
+ switch (op_) {
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
+ break;
+ }
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(¬_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+ // Generate fast case smi code if requested. This flag is set when the fast
+ // case smi code is not generated by the caller. Generating it here will speed
+ // up common operations.
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(edx);
+ __ AbortIfNotSmi(eax);
+ }
+ FloatingPointHelper::LoadSSE2Smis(masm, ecx);
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm);
+ }
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ } else { // SSE2 not available, use FPU.
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ }
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
+ }
+ __ bind(¬_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // Try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm,
+ static_operands_type_,
+ use_sse3_,
+ &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ GenerateReturn(masm);
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, not_string1, string1, string1_smi2;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in edx, eax
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+ if (HasArgsReversed()) {
+ lhs = eax;
+ rhs = edx;
+ } else {
+ lhs = edx;
+ rhs = eax;
+ }
+
+ // Test if first argument is a string.
+ __ test(lhs, Immediate(kSmiTagMask));
+ __ j(zero, ¬_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, ¬_string1);
+
+ // First argument is a string, test second.
+ __ test(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings. Jump to the string add stub.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, edi, ebx, ecx, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ __ test(rhs, Immediate(kSmiTagMask));
+ __ j(zero, ¬_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, ¬_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(¬_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ ASSERT(!HasArgsInRegisters());
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(ecx);
+ if (HasArgsReversed()) {
+ __ push(eax);
+ __ push(edx);
+ } else {
+ __ push(edx);
+ __ push(eax);
+ }
+ __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ __ pop(ecx); // Save return address.
+
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // esp[4]: argument (should be number).
+ // esp[0]: return address.
+ // Test that eax is a number.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the low and high words of the double into ebx, edx.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ sar(eax, 1);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ mov(Operand(esp, 0), eax);
+ __ fild_s(Operand(esp, 0));
+ __ fst_d(Operand(esp, 0));
+ __ pop(edx);
+ __ pop(ebx);
+ __ jmp(&loaded);
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // low and high words into ebx, edx.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+
+ __ bind(&loaded);
+ // ST[0] == double value
+ // ebx = low 32 bits of double value
+ // edx = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ mov(ecx, ebx);
+ __ xor_(ecx, Operand(edx));
+ __ mov(eax, ecx);
+ __ sar(eax, 16);
+ __ xor_(ecx, Operand(eax));
+ __ mov(eax, ecx);
+ __ sar(eax, 8);
+ __ xor_(ecx, Operand(eax));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // ebx = low 32 bits of double value.
+ // edx = high 32 bits of double value.
+ // ecx = TranscendentalCache::hash(double value).
+ __ mov(eax,
+ Immediate(ExternalReference::transcendental_cache_array_address()));
+ // Eax points to cache array.
+ __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // Eax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+ // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ lea(ecx, Operand(eax, ecx, times_4, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmp(ebx, Operand(ecx, 0));
+ __ j(not_equal, &cache_miss);
+ __ cmp(edx, Operand(ecx, kIntSize));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ mov(eax, Operand(ecx, 2 * kIntSize));
+ __ fstp(0);
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ // We are short on registers, so use no_reg as scratch.
+ // This gives slightly larger code.
+ __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+ GenerateOperation(masm);
+ __ mov(Operand(ecx, 0), ebx);
+ __ mov(Operand(ecx, kIntSize), edx);
+ __ mov(Operand(ecx, 2 * kIntSize), eax);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+ // Only free register is edi.
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ mov(edi, edx);
+ __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ int supported_exponent_limit =
+ (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+ __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ j(below, &in_range, taken);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmp(Operand(edi), Immediate(0x7ff00000));
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, taken);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ // NaN is represented by 0x7ff8000000000000.
+ __ push(Immediate(0x7ff80000));
+ __ push(Immediate(0));
+ __ fld_d(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ mov(edi, eax); // Save eax before using fnstsw_ax.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ test(Operand(eax), Immediate(5));
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ __ fstp(0);
+ __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
+
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+ return;
+ }
+ if (!type_info.IsInteger32() || !use_sse3) {
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ }
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ if (!type_info.IsInteger32()) {
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ }
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ if (!type_info.IsDouble()) {
+ if (!type_info.IsSmi()) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(edx);
+ }
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+ }
+
+ __ bind(&arg1_is_object);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ if (!type_info.IsDouble()) {
+ // Test if arg2 is a Smi.
+ if (!type_info.IsSmi()) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ }
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+ }
+
+ __ bind(&arg2_is_object);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ // Test if arg1 is a Smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm,
+ edx,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+
+ // Test if arg2 is a Smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ if (type_info.IsNumber()) {
+ LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
+ } else {
+ LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
+ }
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ test(number, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi, not_taken);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ SmiUntag(number);
+ __ push(number);
+ __ fild_s(Operand(esp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+ Label load_smi_edx, load_eax, load_smi_eax, done;
+ // Load operand in edx into xmm0.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ // Load operand in edx into xmm0, or branch to not_numbers.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, not_numbers); // Argument in edx is not a number.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1, or branch to not_numbers.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(equal, &load_float_eax);
+ __ jmp(not_numbers); // Argument in eax is not a number.
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+ __ jmp(&done);
+ __ bind(&load_float_eax);
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in eax is OK
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
+
+ if (negative_zero_ == kStrictNegativeZero) {
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+ }
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(no_overflow, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ CpuFeatures::IsSupported(SSE3),
+ &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (overwrite_ == UNARY_NO_OVERWRITE) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in edx and the parameter count is in eax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(edx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebp, eax, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(edx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(ebx); // Return address.
+ __ push(edx);
+ __ push(ebx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[16] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &add_arguments_object);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
+ __ mov(ebx, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ j(above, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the second argument is a string.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to ebx.
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+
+ // ebx: Length of subject string as a smi
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ cmp(eax, Operand(ebx));
+ __ j(above_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ and_(ebx,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask);
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // eax: subject string (flat ascii)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string (flat two byte)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(edx, CODE_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
+
+ // eax: subject string
+ // ebx: previous index
+ // edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ mov(Operand(esp, 5 * kPointerSize), ecx);
+
+ // Argument 5: static offsets vector buffer.
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector()));
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &setup_two_byte);
+ __ SmiUntag(edi);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest);
+
+ __ bind(&setup_two_byte);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+
+ __ bind(&setup_rest);
+
+ // Argument 2: Previous index.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+ // Argument 1: Subject string.
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+
+ // Locate the code entry and call it.
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(edx, kRegExpExecuteArguments);
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ j(equal, &success, taken);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure, taken);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(Operand(eax), Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(Operand(edx), Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
+ __ SmiTag(edi);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+ __ mov(number_string_cache,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ __ sub(Operand(mask), Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ if (object_is_smi) {
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ } else {
+ Label not_smi, hash_calculated;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_smi);
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ jmp(&smi_hash_calculated);
+ __ bind(¬_smi);
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ Register probe = mask;
+ __ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(probe, Immediate(kSmiTagMask));
+ __ j(zero, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ } else {
+ __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ __ FCmp();
+ }
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ mov(ebx, Operand(esp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Identical objects can be compared fast, but there are some tricky cases
+ // for NaN and undefined.
+ {
+ Label not_identical;
+ __ cmp(eax, Operand(edx));
+ __ j(not_equal, ¬_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, &check_for_nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ } else {
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc_ == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ } else {
+ Label nan;
+ __ j(above_equal, &nan);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ }
+ }
+
+ __ bind(¬_identical);
+ }
+
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc_ == equal && strict_) {
+ Label slow; // Fallthrough label.
+ Label not_smis;
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
+ __ j(not_zero, ¬_smis);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ STATIC_ASSERT(kSmiTagMask == 1);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(Operand(ecx), Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
+ // if eax was smi, ebx is now edx, else eax.
+
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(¬_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object);
+
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
+
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, Operand(ecx));
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, Operand(ecx));
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, not_taken);
+ __ j(above, &above_label, not_taken);
+
+ __ xor_(eax, Operand(eax));
+ __ ret(0);
+
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+ }
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(below, ¬_both_objects);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
+ // Push arguments below the return address.
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(eax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(eax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &receiver_is_value, not_taken);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow, not_taken);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
+ __ Set(eax, Immediate(argc_));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // eax holds the exception.
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Restore next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // Remove state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of
+ // a JS entry frame.
+ __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
+ Label skip;
+ __ cmp(ebp, 0);
+ __ j(equal, &skip, not_taken);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_BSD_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label empty_handle;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(kStackSpace, kArgc);
+ STATIC_ASSERT(kArgc == 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &empty_handle, not_taken);
+ // It was non-zero. Dereference to get the result value.
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame();
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ bind(&empty_handle);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // eax: result parameter for PerformGC, if any
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: pointer to the first argument (C callee-saved)
+
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack alignment is known to be correct. This function takes one argument
+ // which is passed on the stack, and we know that the stack has been
+ // prepared to pass at least one argument.
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
+ __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ inc(Operand::StaticVariable(scope_depth));
+ }
+
+ // Call C function.
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ call(Operand(ebx));
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ if (always_allocate_scope) {
+ __ dec(Operand::StaticVariable(scope_depth));
+ }
+
+ // Make sure we're not trying to return 'the hole' from the runtime
+ // call as this may lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &okay);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ lea(ecx, Operand(eax, 1));
+ // Lower 2 bits of ecx are 0 iff eax has failure tag.
+ __ test(ecx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned, not_taken);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame();
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry, taken);
+
+ // Special handling of out of memory exceptions.
+ __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, Factory::termination_exception());
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ mov(esp, Operand(esp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(eax, false);
+ __ mov(Operand::StaticVariable(external_caught), eax);
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ }
+
+ // Clear the context pointer.
+ __ xor_(esi, Operand(esi));
+
+ // Restore fp from handler and discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // State.
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects instead
+ // of a proper result. The builtin entry handles this by performing
+ // a garbage collection and retrying the builtin (twice).
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // eax: result parameter for PerformGC, if any (setup below)
+ // ebx: pointer to builtin function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: argv pointer (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Push marker in two places.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(not_equal, ¬_outermost_js);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ bind(¬_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. Notice that we
+ // cannot store a reference to the trampoline code directly in this
+ // stub, because the builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(edx, Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(edx, Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(Operand(edx));
+
+ // Unlink this frame from the handler chain.
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Pop next_sp.
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+ __ j(not_equal, ¬_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(¬_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the left hand is a JS object.
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
+
+ // Get the prototype of the function.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ // edx is function, eax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+
+ // Register mapping:
+ // eax is object map.
+ // edx is function.
+ // ebx is function prototype.
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+
+ __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(ecx, Operand(ebx));
+ __ j(equal, &is_instance);
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ j(equal, &is_not_instance);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ Set(eax, Immediate(0));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s",
+ cc_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object_, Immediate(kSmiTagMask));
+ __ j(zero, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(index_, Immediate(kSmiTagMask));
+ __ j(not_zero, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ test(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ test(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzx_w(result_, FieldOperand(object_,
+ scratch_, times_1, // Scratch is smi-tagged.
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ SmiUntag(scratch_);
+ __ movzx_b(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, eax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(scratch_, Immediate(kSmiTagMask));
+ __ j(not_zero, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ test(code_,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case_, not_taken);
+
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ascii char code.
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, Factory::undefined_value());
+ __ j(equal, &slow_case_, not_taken);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // eax: first string
+ // edx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in eax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in edx.
+ __ mov(eax, edx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // eax: first string
+ // ebx: length of first string as a smi
+ // ecx: length of second string as a smi
+ // edx: second string
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ __ bind(&both_not_zero_length);
+ __ add(ebx, Operand(ecx));
+ STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
+ // Handle exceptionally long strings in the runtime system.
+ __ j(overflow, &string_add_runtime);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(ebx, Immediate(Smi::FromInt(2)));
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ j(below, &string_add_flat_result);
+
+ // If result is not supposed to be flat allocate a cons string object. If both
+ // strings are ascii the result is an ascii cons string.
+ Label non_ascii, allocated, ascii_data;
+ __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ and_(ecx, Operand(edi));
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ __ bind(&ascii_data);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
+ __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
+ __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ mov(eax, ecx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // ecx: first instance type AND second instance type.
+ // edi: second instance type.
+ __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, Operand(ecx));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&string_add_flat_result);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ Label non_ascii_string_add_flat_result;
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
+ // Both strings are ascii strings. As they are short they are both flat.
+ // ebx: length of resulting flat string as a smi
+ __ SmiUntag(ebx);
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // eax: first string - known to be two byte
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ SmiUntag(ebx);
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ } else {
+ __ mov_w(scratch, Operand(src, 0));
+ __ mov_w(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
+ }
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords.
+ // The destination is aligned on a 4 byte boundary because we are
+ // copying to the beginning of a newly allocated string.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ cld();
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ mov(scratch, c1);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(above, ¬_array_index);
+ __ mov(scratch, c2);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(¬_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, kBitsPerByte);
+ __ or_(chars, Operand(c2));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+ __ mov(symbol_table,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ SmiUntag(mask);
+ __ sub(Operand(mask), Immediate(1));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // symbol_table: symbol table
+ // mask: capacity mask
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ mov(scratch, hash);
+ if (i > 0) {
+ __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(mask));
+
+ // Load the entry from the symbol table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ mov(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, Factory::undefined_value());
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ cmp(FieldOperand(candidate, String::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &next_probe[i]);
+
+ // As we are out of registers save the mask on the stack and use that
+ // register as a temporary.
+ __ push(mask);
+ Register temp = mask;
+
+ // Check that the candidate is a non-external ascii string.
+ __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe_pop_mask[i]);
+
+ // Check if the two characters match.
+ __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ and_(temp, 0x0000ffff);
+ __ cmp(chars, Operand(temp));
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe_pop_mask[i]);
+ __ pop(mask);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ pop(mask); // Pop saved mask from the stack.
+ if (!result.is(eax)) {
+ __ mov(eax, result);
+ }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ mov(hash, character);
+ __ shl(hash, 10);
+ __ add(hash, Operand(character));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, Operand(character));
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ sar(scratch, 11);
+ __ xor_(hash, Operand(scratch));
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, Operand(scratch));
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ test(hash, Operand(hash));
+ __ j(not_zero, &hash_not_zero);
+ __ mov(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+
+ // Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ sub(ecx, Operand(edx));
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label return_eax;
+ __ j(equal, &return_eax);
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ SmiUntag(ecx); // Result length is no longer smi.
+ __ cmp(ecx, 2);
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (value is 2)
+ // edx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiUntag(edx); // From index is no longer smi.
+ __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx,
+ FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ Set(ecx, Immediate(2));
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ SmiUntag(ebx);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&non_ascii_flat);
+ // eax: string
+ // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // ecx: result string length
+ // Check for flat two byte string
+ __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+
+ __ bind(&return_eax);
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+
+ // Find minimum length.
+ Label left_shorter;
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ SmiUntag(min_length);
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
+ }
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ __ test(length_delta, Operand(length_delta));
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&result_not_equal);
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, ¬_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(¬_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+
+ // Compare flat ascii strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(ecx);
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
new file mode 100644
index 0000000..acf4a6f
--- /dev/null
+++ b/src/ia32/code-stubs-ia32.h
@@ -0,0 +1,360 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_CODE_STUBS_IA32_H_
+#define V8_IA32_CODE_STUBS_IA32_H_
+
+#include "macro-assembler.h"
+#include "code-stubs.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags,
+ TypeInfo operands_type)
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false),
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ if (static_operands_type_.IsSmi()) {
+ mode_ = NO_OVERWRITE;
+ }
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ use_sse3_(SSE3Bits::decode(key)),
+ static_operands_type_(TypeInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(runtime_operands_type),
+ name_(NULL) {
+ }
+
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ bool ArgsInRegistersSupported() {
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
+ }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
+ bool use_sse3_;
+
+ // Number type information of operands, determined by code generator.
+ TypeInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_),
+ static_operands_type_.ToString());
+ }
+#endif
+
+ // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+ class ArgsReversedBits: public BitField<bool, 11, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_)
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of above.
+ bool ascii);
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register eax.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {
+ }
+
+ // Compare two flat ascii strings and returns result in eax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index a48c74e..d399c35 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -29,8 +29,9 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "bootstrapper.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
@@ -934,97 +935,6 @@
}
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
-
- // Test if operands are smis or heap numbers and load them
- // into xmm0 and xmm1 if they are. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-};
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
// Perform or call the specialized stub for a binary operation. Requires the
// three registers left, right and dst to be distinct and spilled. This
// deferred operation has up to three entry points: The main one calls the
@@ -1541,7 +1451,7 @@
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
false, overwrite_mode);
@@ -1564,7 +1474,7 @@
overwrite_mode,
NO_GENERIC_BINARY_FLAGS,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
}
}
@@ -1573,6 +1483,20 @@
}
+Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right) {
+ if (stub->ArgsInRegistersSupported()) {
+ stub->SetArgsInRegisters();
+ return frame_->CallStub(stub, left, right);
+ } else {
+ frame_->Push(left);
+ frame_->Push(right);
+ return frame_->CallStub(stub, 2);
+ }
+}
+
+
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
Object* answer_object = Heap::undefined_value();
switch (op) {
@@ -2772,41 +2696,6 @@
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
- dest->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- dest->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
} else if (left_side_constant_1_char_string ||
right_side_constant_1_char_string) {
if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
@@ -5817,12 +5706,9 @@
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5909,12 +5795,9 @@
frame()->Push(&value);
Load(node->value());
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -6012,11 +5895,8 @@
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -6428,11 +6308,10 @@
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -6445,8 +6324,7 @@
// constructor invocation.
CodeForSourcePosition(node->position());
Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
@@ -7447,7 +7325,6 @@
Label empty;
__ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
__ j(equal, &empty);
- ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
__ mov(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::fixed_cow_array_map()));
__ bind(&empty);
@@ -8055,6 +7932,42 @@
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(value.reg());
+ }
+
+ __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result string = frame_->Pop();
+ string.ToRegister();
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(string.reg());
+ }
+
+ Result number = allocator()->Allocate();
+ ASSERT(number.is_valid());
+ __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
+ __ IndexFromHash(number.reg(), number.reg());
+ string.Unuse();
+ frame_->Push(&number);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
ASSERT(!in_safe_int32_mode());
if (CheckForInlineRuntimeCall(node)) {
@@ -8218,9 +8131,7 @@
frame_->Push(&value);
} else {
Load(node->expression());
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
@@ -8925,11 +8836,9 @@
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ if (node->left()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ } else if (node->right()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_RIGHT;
}
@@ -9161,6 +9070,41 @@
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ ASSERT(!in_safe_int32_mode());
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ cmp(operand.reg(), Factory::null_value());
+ if (node->is_strict()) {
+ operand.Unuse();
+ destination()->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ destination()->true_target()->Branch(equal);
+ __ cmp(operand.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ temp.Unuse();
+ operand.Unuse();
+ destination()->Split(not_zero);
+ }
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
@@ -9886,4425 +9830,6 @@
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ mov(ebx, Immediate(Factory::empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(Factory::the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Setup the fixed slots.
- __ xor_(ebx, Operand(ebx)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
- __ cmp(ecx, Factory::undefined_value());
- __ j(equal, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = Factory::fixed_array_map();
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = Factory::fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // 'null' => false.
- __ cmp(eax, Factory::null_value());
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, ¬_string);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(¬_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &true_result);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in eax.
- __ bind(&true_result);
- __ mov(eax, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(eax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
-
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
- }
- }
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, ¬_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(¬_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
-
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
-
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(¬_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
- Label not_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
-
- // Test if first argument is a string.
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, ¬_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, ¬_string1);
-
- // First argument is a string, test second.
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &string1);
-
- // First and second argument are strings. Jump to the string add stub.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, edi, ebx, ecx, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(¬_string1);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, ¬_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, ¬_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(¬_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- __ pop(ecx); // Save return address.
-
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // esp[4]: argument (should be number).
- // esp[0]: return address.
- // Test that eax is a number.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- // ST[0] == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- __ mov(eax,
- Immediate(ExternalReference::transcendental_cache_array_address()));
- // Eax points to cache array.
- __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- __ fstp(0);
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- GenerateOperation(masm);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
- // Only free register is edi.
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, taken);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(equal, &load_float_eax);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
- }
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- Label undo;
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(no_overflow, &done, taken);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
- __ jmp(&slow);
-
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_ == UNARY_OVERWRITE) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow, not_taken);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
- &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, not_taken);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ jmp(&done);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (overwrite_ == UNARY_NO_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite eax until
- // we're sure we can do it without going through the slow case
- // that needs the value in eax.
- __ AllocateHeapNumber(ebx, edx, edi, &slow);
- __ mov(eax, Operand(ebx));
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- } else {
- UNIMPLEMENTED();
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[16] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, Operand(ecx));
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
- __ j(above, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ cmp(eax, Operand(ebx));
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
- __ j(greater, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ and_(ebx,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Factory::empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(edi, Immediate(1)); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(edi, Immediate(0)); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(edx, CODE_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // eax: subject string
- // edx: code
- // edi: encoding of subject string (1 if ascii, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 5 * kPointerSize), ecx);
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector()));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ test(edi, Operand(edi));
- __ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &setup_two_byte);
- __ SmiUntag(edi);
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Subject string.
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(edx, kRegExpExecuteArguments);
-
- // Check the result.
- Label success;
- __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ cmp(eax, Operand::StaticVariable(pending_exception));
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(Operand(eax), Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi, hash_calculated;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smi);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated);
- __ bind(¬_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, Operand(edx));
- __ j(not_equal, ¬_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, &check_for_nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, ¬_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
- }
-
- __ bind(¬_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, ¬_smis);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(¬_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
-
- __ xor_(eax, Operand(eax));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_both_objects);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, ¬_both_objects);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(below, ¬_both_objects);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(¬_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(eax);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(eax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // eax holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // Remove state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of
- // a JS entry frame.
- __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
- Label skip;
- __ cmp(ebp, 0);
- __ j(equal, &skip, not_taken);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly. If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
-#else
-static const bool kPassHandlesDirectly = false;
-#endif
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
- STATIC_ASSERT(kArgc == 4);
- if (kPassHandlesDirectly) {
- // When handles as passed directly we don't have to allocate extra
- // space for and pass an out parameter.
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
- } else {
- // The function expects three arguments to be passed but we allocate
- // four to get space for the output cell. The argument slots are filled
- // as follows:
- //
- // 3: output cell
- // 2: arguments pointer
- // 1: name
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function.
- __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
- __ mov(ebx, esp);
- __ add(Operand(ebx), Immediate(3 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
- }
- // Call the api function!
- __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
- __ cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
- __ j(not_equal, &promote_scheduled_exception, not_taken);
- if (!kPassHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- __ mov(eax, Operand(eax, 0));
- }
- // Check if the result handle holds 0.
- __ test(eax, Operand(eax));
- __ j(zero, &empty_handle, not_taken);
- // It was non-zero. Dereference to get the result value.
- __ mov(eax, Operand(eax, 0));
- __ bind(&prologue);
- __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
- __ ret(0);
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- __ bind(&empty_handle);
- // It was zero; the result is undefined.
- __ mov(eax, Factory::undefined_value());
- __ jmp(&prologue);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ call(Operand(ebx));
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- Label okay;
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &okay);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
-
- // Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, Factory::termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ mov(esp, Operand(esp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(eax, false);
- __ mov(Operand::StaticVariable(external_caught), eax);
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ mov(Operand::StaticVariable(pending_exception), eax);
- }
-
- // Clear the context pointer.
- __ xor_(esi, Operand(esi));
-
- // Restore fp from handler and discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // State.
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ push(Operand::StaticVariable(c_entry_fp));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, ¬_outermost_js);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ bind(¬_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. Notice that we
- // cannot store a reference to the trampoline code directly in this
- // stub, because the builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
-
- // Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- // Pop next_sp.
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(not_equal, ¬_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(¬_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the left hand is a JS object.
- __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
- // Get the prototype of the function.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
- // edx is function, eax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
-
- // Check that the function prototype is a JS object.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
-
- // Register mapping:
- // eax is object map.
- // edx is function.
- // ebx is function prototype.
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
-
- __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(ecx, Operand(ebx));
- __ j(equal, &is_instance);
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
- __ j(equal, &is_not_instance);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ Set(eax, Immediate(0));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ test(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ test(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiUntag(scratch_);
- __ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, eax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
-
- __ Set(result_, Immediate(Factory::single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, Factory::undefined_value());
- __ j(equal, &slow_case_, not_taken);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in eax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(ebx, Immediate(Smi::FromInt(2)));
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
- }
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(above, ¬_array_index);
- __ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(¬_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // symbol_table: symbol table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(mask));
-
- // Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- __ cmp(candidate, Factory::undefined_value());
- __ j(equal, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ascii string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, Operand(character));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, Operand(character));
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, Operand(scratch));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ test(hash, Operand(hash));
- __ j(not_zero, &hash_not_zero);
- __ mov(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ sub(ecx, Operand(edx));
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // eax: string
- // ebx: instance type
- // ecx: sub string length (value is 2)
- // edx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiUntag(edx); // From index is no longer smi.
- __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx,
- FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
-
- __ bind(&result_longer_than_two);
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- __ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
-
- __ IncrementCounter(&Counters::string_compare_native, 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- __ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ SmiUntag(min_length);
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
- }
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&result_not_equal);
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(edx, Operand(eax));
- __ j(not_equal, ¬_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(¬_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ascii strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
#undef __
#define __ masm.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index ce1bcf6..2a8d313 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -574,6 +574,11 @@
void Int32BinaryOperation(BinaryOperation* node);
+ // Generate a stub call from the virtual frame.
+ Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right);
+
void Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -730,6 +735,9 @@
// Check whether two RegExps are equivalent
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -803,327 +811,6 @@
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {
- }
-
- // Compare two flat ascii strings and returns result in eax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index b57cf3d..ee94565 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -94,22 +94,33 @@
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs,
+ RegList object_regs,
+ RegList non_object_regs,
bool convert_call_to_jmp) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
// Enter an internal frame.
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- __ PushRegistersFromMemory(pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, "Unable to encode value as smi");
+ }
+ __ SmiTag(reg);
+ __ push(reg);
+ }
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -117,12 +128,25 @@
__ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break()));
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- __ PopRegistersToMemory(pointer_regs);
+ // stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, Immediate(kDebugZapValue));
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ }
+ }
// Get rid of the internal frame.
__ LeaveInternalFrame();
@@ -130,12 +154,9 @@
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ pop(eax);
+ __ add(Operand(esp), Immediate(kPointerSize));
}
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -151,7 +172,7 @@
// -- eax : receiver
// -- ecx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
}
@@ -162,7 +183,8 @@
// -- ecx : name
// -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
@@ -172,7 +194,7 @@
// -- edx : receiver
// -- eax : key
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
}
@@ -183,19 +205,17 @@
// -- ecx : key
// -- edx : receiver
// -----------------------------------
- // Register eax contains an object that needs to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- ecx: name
// -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
}
@@ -204,10 +224,11 @@
// eax is the actual number of arguments not encoded as a smi see comment
// above IC call.
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
}
@@ -216,7 +237,7 @@
// ----------- S t a t e -------------
// -- eax: return value
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
}
@@ -225,7 +246,7 @@
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, 0, 0, false);
}
@@ -245,7 +266,7 @@
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0, true);
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 212cfde..9baf763 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -35,21 +35,6 @@
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute the stack pointer.
@@ -58,58 +43,11 @@
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 684ee14..1631b04 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -216,12 +217,28 @@
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
+ // We never generate inlined constant smi operations for these.
+ return kNoConstants;
+ } else if (right->IsSmiLiteral()) {
+ return kRightConstant;
+ } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
+ return kLeftConstant;
+ } else {
+ return kNoConstants;
+ }
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -246,20 +263,7 @@
case Expression::kTest:
// For simplicity we always test the accumulator register.
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -289,20 +293,7 @@
case Expression::kTest:
// For simplicity we always test the accumulator register.
Move(result_register(), slot);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(result_register(), slot);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -330,20 +321,7 @@
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit->handle());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(result_register(), lit->handle());
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -371,20 +349,7 @@
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ pop(result_register());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- __ mov(result_register(), Operand(esp, 0));
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -420,56 +385,7 @@
// For simplicity we always test the accumulator register.
__ Drop(count);
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ mov(result_register(), reg);
- __ mov(Operand(esp, 0), result_register());
- break;
- }
- DoTest(context);
- break;
- }
-}
-
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -510,32 +426,6 @@
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- __ push(Immediate(Factory::true_value()));
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- __ push(Immediate(Factory::false_value()));
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -563,78 +453,19 @@
break;
}
case Expression::kTest:
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) __ push(Immediate(Factory::false_value()));
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) __ push(Immediate(Factory::true_value()));
- break;
- }
- __ jmp(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is in the accumulator. If the value might be needed
- // on the stack (value/test and test/value contexts with a stack location
- // desired), then the value is already duplicated on the stack.
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
- // In value/test and test/value expression contexts with stack as the
- // desired location, there is already an extra value on the stack. Use a
- // label to discard it if unneeded.
- Label discard;
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_false = &discard;
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_true = &discard;
- break;
- }
- break;
- }
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Emit the inlined tests assumed by the stub.
__ cmp(result_register(), Factory::undefined_value());
__ j(equal, if_false);
@@ -648,83 +479,28 @@
__ test(result_register(), Immediate(kSmiTagMask));
__ j(zero, if_true);
- // Save a copy of the value if it may be needed and isn't already saved.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- }
-
// Call the ToBoolean stub for all other cases.
ToBooleanStub stub;
__ push(result_register());
__ CallStub(&stub);
__ test(eax, Operand(eax));
- // The stub returns nonzero for true. Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
- case Expression::kTest:
- __ j(not_zero, true_label_);
- __ jmp(false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ j(zero, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ j(not_zero, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
-
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ j(not_zero, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ j(zero, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
@@ -908,20 +684,21 @@
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
+ // Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
- __ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(equal, true);
__ CallStub(&stub);
__ test(eax, Operand(eax));
@@ -1203,7 +980,7 @@
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, Factory::undefined_value());
__ j(not_equal, &materialized);
@@ -1391,10 +1168,11 @@
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1405,57 +1183,70 @@
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(eax); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1496,14 +1287,325 @@
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- __ push(result_register());
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Unknown());
+void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ __ add(Operand(eax), Immediate(value));
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // Undo the optimistic add operation and call the shared stub.
+ __ bind(&call_stub);
+ __ sub(Operand(eax), Immediate(value));
+ Token::Value op = Token::ADD;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(eax);
+ } else {
+ __ push(eax);
+ __ push(Immediate(value));
+ }
__ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ if (left_is_constant_smi) {
+ __ mov(ecx, eax);
+ __ mov(eax, Immediate(value));
+ __ sub(Operand(eax), ecx);
+ } else {
+ __ sub(Operand(eax), Immediate(value));
+ }
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ __ bind(&call_stub);
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(ecx);
+ } else {
+ // Undo the optimistic sub operation.
+ __ add(Operand(eax), Immediate(value));
+
+ __ push(eax);
+ __ push(Immediate(value));
+ }
+
+ Token::Value op = Token::SUB;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ __ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label call_stub, smi_case, done;
+ int shift_value = value->value() & 0x1f;
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&call_stub);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ __ push(eax);
+ __ push(Immediate(value));
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SHL:
+ if (shift_value != 0) {
+ __ mov(edx, eax);
+ if (shift_value > 1) {
+ __ shl(edx, shift_value - 1);
+ }
+ // Convert int result to smi, checking that it is in int range.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ add(edx, Operand(edx));
+ __ j(overflow, &call_stub);
+ __ mov(eax, edx); // Put result back into eax.
+ }
+ break;
+ case Token::SAR:
+ if (shift_value != 0) {
+ __ sar(eax, shift_value);
+ __ and_(eax, ~kSmiTagMask);
+ }
+ break;
+ case Token::SHR:
+ if (shift_value < 2) {
+ __ mov(edx, eax);
+ __ SmiUntag(edx);
+ __ shr(edx, shift_value);
+ __ test(edx, Immediate(0xc0000000));
+ __ j(not_zero, &call_stub);
+ __ SmiTag(edx);
+ __ mov(eax, edx); // Put result back into eax.
+ } else {
+ __ SmiUntag(eax);
+ __ shr(eax, shift_value);
+ __ SmiTag(eax);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label smi_case, done;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ // The order of the arguments does not matter for bit-ops with a
+ // constant operand.
+ __ push(Immediate(value));
+ __ push(eax);
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::BIT_OR:
+ __ or_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_AND:
+ __ and_(Operand(eax), Immediate(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ EmitConstantSmiBitOp(expr, op, context, mode, value);
+ break;
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ ASSERT(!left_is_constant_smi);
+ EmitConstantSmiShiftOp(expr, op, context, mode, value);
+ break;
+ case Token::ADD:
+ EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value);
+ break;
+ case Token::SUB:
+ EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ if (constant == kRightConstant) {
+ Smi* value = Smi::cast(*right->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, false, value);
+ return;
+ } else if (constant == kLeftConstant) {
+ Smi* value = Smi::cast(*left->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, true, value);
+ return;
+ }
+
+ // Do combined smi check of the operands. Left operand is on the
+ // stack. Right operand is in eax.
+ Label done, stub_call, smi_case;
+ __ pop(edx);
+ __ mov(ecx, eax);
+ __ or_(eax, Operand(edx));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&stub_call);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (stub.ArgsInRegistersSupported()) {
+ stub.GenerateCall(masm_, edx, ecx);
+ } else {
+ __ push(edx);
+ __ push(ecx);
+ __ CallStub(&stub);
+ }
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ __ mov(eax, edx); // Copy left operand in case of a stub call.
+
+ switch (op) {
+ case Token::SAR:
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ sar_cl(eax); // No checks of result necessary
+ __ SmiTag(eax);
+ break;
+ case Token::SHL: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(positive, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::SHR: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shr_cl(eax);
+ __ test(eax, Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::ADD:
+ __ add(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::SUB:
+ __ sub(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(eax);
+ __ imul(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &done, taken);
+ __ mov(ebx, edx);
+ __ or_(ebx, Operand(ecx));
+ __ j(negative, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ or_(eax, Operand(ecx));
+ break;
+ case Token::BIT_AND:
+ __ and_(eax, Operand(ecx));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(eax, Operand(ecx));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode) {
+ TypeInfo type = TypeInfo::Unknown();
+ GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
+ if (stub.ArgsInRegistersSupported()) {
+ __ pop(edx);
+ stub.GenerateCall(masm_, edx, eax);
+ } else {
+ __ push(result_register());
+ __ CallStub(&stub);
+ }
Apply(context, eax);
}
@@ -1920,11 +2022,11 @@
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
- VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForValue(expr->expression(), kStack);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -1937,16 +2039,13 @@
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into edi and eax.
+ // Load function and argument count into edi and eax.
__ Set(eax, Immediate(arg_count));
- // Function is in esp[arg_count + 1].
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in eax, or pop it.
- DropAndApply(1, context_, eax);
+ Apply(context_, eax);
}
@@ -1958,11 +2057,12 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1976,11 +2076,12 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1994,7 +2095,9 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
@@ -2009,8 +2112,7 @@
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, if_false);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, if_true);
- __ jmp(if_false);
+ Split(below_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2024,13 +2126,14 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(above_equal, if_true);
- __ jmp(if_false);
+ Split(above_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2044,15 +2147,16 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
- __ jmp(if_false);
+ Split(not_zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2067,7 +2171,9 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -2085,13 +2191,14 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2105,13 +2212,14 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2125,13 +2233,14 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2144,7 +2253,9 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2160,8 +2271,7 @@
__ bind(&check_frame_marker);
__ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2177,12 +2287,13 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(ebx);
__ cmp(eax, Operand(ebx));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2640,14 +2751,6 @@
}
-void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
- __ CallRuntime(Runtime::kRegExpCloneResult, 1);
- Apply(context_, eax);
-}
-
-
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
@@ -2745,6 +2848,46 @@
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ Split(zero, if_true, if_false, fall_through);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
+
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2845,19 +2988,7 @@
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::undefined_value());
- break;
- case kStack:
- __ push(Immediate(Factory::undefined_value()));
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2866,45 +2997,22 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
-
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(eax, CodeGenerator::GlobalObject());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ call(ic, RelocInfo::CODE_TARGET);
- __ push(eax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, eax);
break;
@@ -2925,9 +3033,7 @@
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2941,28 +3047,26 @@
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register eax.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &call_stub);
+ __ lea(eax, Operand(eax, kSmiTagMask));
+ __ not_(eax);
+ __ jmp(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ jmp(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ not_(result_register());
- __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag.
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, eax);
break;
}
@@ -2974,6 +3078,8 @@
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
@@ -3022,8 +3128,10 @@
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ }
__ push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&no_conversion);
@@ -3038,8 +3146,6 @@
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -3060,7 +3166,7 @@
// Inline smi case if we are in a loop.
Label stub_call, done;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
@@ -3146,68 +3252,117 @@
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
}
}
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmp(obj, Operand(null_const));
- if (strict) {
- __ j(equal, if_true);
- } else {
- __ j(equal, if_true);
- __ cmp(obj, Factory::undefined_value());
- __ j(equal, if_true);
- __ test(obj, Immediate(kSmiTagMask));
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
- // It can be an undetectable object.
- __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
+ // Check for undetectable objects => false.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ Split(below, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(eax, Factory::true_value());
+ __ j(equal, if_true);
+ __ cmp(eax, Factory::false_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, if_true);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // Check for undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+ __ j(equal, if_true);
+ // Regular expressions => 'function' (they are callable).
+ __ CmpInstanceType(edx, JS_REGEXP_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, edx);
+ __ j(equal, if_false);
+ // Check for undetectable objects => false.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ // Check for JS objects => true.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, if_false);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ Split(less_equal, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
@@ -3215,7 +3370,19 @@
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
switch (expr->op()) {
@@ -3223,8 +3390,7 @@
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
@@ -3232,8 +3398,8 @@
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
break;
}
@@ -3241,28 +3407,14 @@
VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ: {
+ case Token::EQ:
cc = equal;
__ pop(edx);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, edx, eax, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, eax, edx, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = less;
__ pop(edx);
@@ -3289,23 +3441,21 @@
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3315,6 +3465,34 @@
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ cmp(eax, Factory::null_value());
+ if (expr->is_strict()) {
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ __ j(equal, if_true);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, if_true);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // It can be an undetectable object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(edx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 283ae4d..3d0bd79 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -519,31 +519,6 @@
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(hash, String::kArrayIndexValueMask);
- __ shr(hash, String::kHashShift - kSmiTagSize);
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(key, hash);
-}
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -704,7 +679,7 @@
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, eax, ebx);
+ __ IndexFromHash(ebx, eax);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1565,7 +1540,7 @@
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, ecx, ebx);
+ __ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 79b4064..c215142 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -191,81 +191,6 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), reg);
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(reg, Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PushRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Push the content of the memory location to the stack.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- push(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PopRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Pop the content from the stack to the memory location.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- pop(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(scratch, Operand(base, 0));
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), scratch);
- lea(base, Operand(base, kPointerSize));
- }
- }
-}
-
void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
@@ -274,6 +199,7 @@
}
#endif
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, Operand(dst)); // shorter than mov
@@ -377,6 +303,17 @@
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ test(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is not a string");
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(below, "Operand is not a string");
+}
+
+
void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is a smi");
@@ -405,7 +342,8 @@
leave();
}
-void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
+
+void MacroAssembler::EnterExitFramePrologue() {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -413,7 +351,7 @@
push(ebp);
mov(ebp, Operand(esp));
- // Reserve room for entry stack pointer and push the debug marker.
+ // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
@@ -425,21 +363,8 @@
mov(Operand::StaticVariable(context_address), esi);
}
-void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // TODO(1243899): This should be symmetric to
- // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
- // correct here, but computed for the other call. Very error
- // prone! FIX THIS. Actually there are deeper problems with
- // register saving than this asymmetry (see the bug report
- // associated with this issue).
- PushRegistersFromMemory(kJSCallerSaved);
- }
-#endif
+void MacroAssembler::EnterExitFrameEpilogue(int argc) {
// Reserve space for arguments.
sub(Operand(esp), Immediate(argc * kPointerSize));
@@ -455,44 +380,30 @@
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
- EnterExitFramePrologue(mode);
+void MacroAssembler::EnterExitFrame() {
+ EnterExitFramePrologue();
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
- EnterExitFrameEpilogue(mode, 2);
+ EnterExitFrameEpilogue(2);
}
-void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+void MacroAssembler::EnterApiExitFrame(int stack_space,
int argc) {
- EnterExitFramePrologue(mode);
+ EnterExitFramePrologue();
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
- EnterExitFrameEpilogue(mode, argc);
+ EnterExitFrameEpilogue(argc);
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // It's okay to clobber register ebx below because we don't need
- // the function pointer after this.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- lea(ebx, Operand(ebp, kOffset));
- CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
- }
-#endif
-
+void MacroAssembler::LeaveExitFrame() {
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1040,6 +951,25 @@
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ and_(hash, String::kArrayIndexValueMask);
+ STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
+ if (String::kHashShift > kSmiTagSize) {
+ shr(hash, String::kHashShift - kSmiTagSize);
+ }
+ if (!index.is(hash)) {
+ mov(index, hash);
+ }
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
@@ -1369,6 +1299,30 @@
}
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ mov(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (FLAG_debug_code) {
+ Label ok, fail;
+ CheckMap(map, Factory::meta_map(), &fail, false);
+ jmp(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
+}
+
void MacroAssembler::Ret() {
ret(0);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e5abfb4..5e850c0 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -99,13 +99,6 @@
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void PushRegistersFromMemory(RegList regs);
- void PopRegistersToMemory(RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -128,18 +121,25 @@
// Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(ExitFrame::Mode mode);
+ void EnterExitFrame();
- void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
+ void EnterApiExitFrame(int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame();
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -267,6 +267,9 @@
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -396,6 +399,12 @@
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// ---------------------------------------------------------------------------
// Runtime calls
@@ -564,8 +573,8 @@
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
- void EnterExitFramePrologue(ExitFrame::Mode mode);
- void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+ void EnterExitFramePrologue();
+ void EnterExitFrameEpilogue(int argc);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index a7930fb..2aab7a8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -31,11 +31,9 @@
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 {
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index c6c65f0..7fc3f81 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -257,16 +257,8 @@
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
- // Load the global or builtins object from the current context.
- __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ mov(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ mov(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadGlobalFunction(index, prototype);
+ __ LoadGlobalFunctionInitialMap(prototype, prototype);
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index ff9132c..5f1e1e4 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1143,9 +1143,9 @@
// and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
- PushElementAt(arg_count + 1);
+ PushElementAt(arg_count);
Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(edi);
// Constructors are called with the number of arguments in register