Merge V8 at 3.8.9.11
Bug: 5688872
Change-Id: Ie3b1dd67a730ec5e82686b7b37dba26f6a9bb24f
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 5f67077..ef10922 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,13 +30,15 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
+#include "ia32/assembler-ia32.h"
+
#include "cpu.h"
#include "debug.h"
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 322ba44..a42f632 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -350,7 +350,7 @@
}
#endif
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -377,7 +377,7 @@
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -388,8 +388,91 @@
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
+ int mask = m - 1;
+ int addr = pc_offset();
+ Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+ EnsureSpace ensure_space(this);
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ // Older CPUs that do not support SSE2 may not support multibyte NOP
+ // instructions.
+ for (; bytes > 0; bytes--) {
+ EMIT(0x90);
+ }
+ return;
+ }
+
+ // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
+ while (bytes > 0) {
+ switch (bytes) {
+ case 2:
+ EMIT(0x66);
+ case 1:
+ EMIT(0x90);
+ return;
+ case 3:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0);
+ return;
+ case 4:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x40);
+ EMIT(0);
+ return;
+ case 6:
+ EMIT(0x66);
+ case 5:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x44);
+ EMIT(0);
+ EMIT(0);
+ return;
+ case 7:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x80);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ return;
+ default:
+ case 11:
+ EMIT(0x66);
+ bytes--;
+ case 10:
+ EMIT(0x66);
+ bytes--;
+ case 9:
+ EMIT(0x66);
+ bytes--;
+ case 8:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x84);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ bytes -= 8;
+ }
}
}
@@ -463,13 +546,6 @@
}
-void Assembler::push(Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(handle);
-}
-
-
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@@ -1644,6 +1720,27 @@
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -1957,6 +2054,16 @@
}
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x2E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2162,6 +2269,19 @@
}
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x17);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2341,7 +2461,7 @@
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d798f81..9ed46fc 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -673,7 +674,6 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
- void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -926,6 +926,9 @@
void fsin();
void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void fadd(int i);
void fsub(int i);
@@ -983,6 +986,7 @@
void andpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1017,6 +1021,7 @@
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
+ void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
@@ -1080,7 +1085,7 @@
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index e12e79a..28c97f0 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -333,7 +333,7 @@
__ push(ebx);
__ push(ebx);
- // Setup pointer to last argument.
+ // Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
@@ -537,7 +537,7 @@
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
+ // Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
@@ -1238,37 +1238,42 @@
false,
&prepare_generic_code_call);
__ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ push(eax);
- // eax: JSArray
+ __ push(ebx);
+ __ mov(ebx, Operand(esp, kPointerSize));
// ebx: argc
// edx: elements_array_end (untagged)
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
// Location of the last argument
- __ lea(edi, Operand(esp, 2 * kPointerSize));
+ int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
+ __ lea(edi, Operand(esp, last_arg_offset));
// Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+ // AllocateJSArray is false, so the FixedArray is returned in ecx).
__ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+ Label has_non_smi_element;
+
// ebx: argc
// edx: location of the first array element
// edi: location of the last argument
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
Label loop, entry;
__ mov(ecx, ebx);
__ jmp(&entry);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(eax, &has_non_smi_element);
+ }
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ bind(&entry);
@@ -1278,13 +1283,21 @@
// Remove caller arguments from the stack and return.
// ebx: argc
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
+ __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+ __ pop(ebx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size,
+ last_arg_offset - kPointerSize));
+ __ jmp(ecx);
+
+ __ bind(&has_non_smi_element);
+ // Throw away the array that's only been partially constructed.
+ __ pop(eax);
+ __ UndoAllocationInNewSpace(eax);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
@@ -1296,6 +1309,40 @@
}
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray function shoud be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for InternalArray function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for InternalArray function");
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1597,6 +1644,7 @@
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3a286f0..eded335 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -128,14 +128,14 @@
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
@@ -179,7 +179,7 @@
// Get the serialized scope info from the stack.
__ mov(ebx, Operand(esp, 2 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->block_context_map());
@@ -202,7 +202,7 @@
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
__ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
@@ -749,7 +749,7 @@
// Exponent word in scratch, exponent part of exponent word in scratch2.
// Zero in ecx.
// We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -2938,157 +2938,263 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // edx = base
- // eax = exponent
- // ecx = temporary, result
-
CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, ecx);
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(eax, &exponent_nonsmi);
- __ JumpIfNotSmi(edx, &base_nonsmi);
-
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
+ const Register exponent = eax;
+ const Register base = edx;
+ const Register scratch = ecx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ Label call_runtime, done, exponent_not_smi, int_exponent;
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiUntag(eax);
+ // Save 1 in double_result - we need this several times later on.
+ __ mov(scratch, Immediate(1));
+ __ cvtsi2sd(double_result, scratch);
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(edx, eax);
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ mov(base, Operand(esp, 2 * kPointerSize));
+ __ mov(exponent, Operand(esp, 1 * kPointerSize));
+
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ cmp(FieldOperand(base, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+
+ __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ cvtsi2sd(double_base, base);
+
+ __ bind(&unpack_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, Operand(double_exponent));
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmp(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ mov(scratch, Immediate(0x3F000000u));
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, ¬_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(¬_plus_half);
+ // Load double_exponent with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), double_exponent);
+ __ fld_d(Operand(esp, 0)); // E
+ __ movdbl(Operand(esp, 0), double_base);
+ __ fld_d(Operand(esp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ test_b(eax, 0x5F); // We check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done);
+
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ __ mov(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmp(eax, 0);
- __ j(greater_equal, &no_neg, Label::kNear);
- __ neg(eax);
+ Label no_neg, while_true, no_multiply;
+ __ test(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ neg(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shr(eax, 1);
+ __ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
+
+ __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
- // base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ test(edx, edx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, ecx);
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
+ // scratch has the original value of the exponent - if the exponent is
+ // negative, return 1/result.
+ __ test(exponent, exponent);
+ __ j(positive, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // exponent is a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtsi2sd(double_exponent, exponent);
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&handle_special_cases, Label::kNear);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(4, scratch);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ }
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, ecx);
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
- __ mov(eax, ecx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -3273,7 +3379,7 @@
__ mov(FieldOperand(eax, i), edx);
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
@@ -3286,7 +3392,7 @@
Heap::kArgumentsLengthIndex * kPointerSize),
ecx);
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
@@ -3465,7 +3571,7 @@
// Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
@@ -3617,7 +3723,7 @@
kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string. None of the following
+ // Any other flat string must be a flat ASCII string. None of the following
// string type tests will succeed if subject is not a string or a short
// external string.
__ and_(ebx, Immediate(kIsNotStringMask |
@@ -3666,16 +3772,16 @@
kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ascii or external.
+ // Any other flat string must be sequential ASCII or external.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask);
__ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
+ // eax: subject string (flat ASCII)
// ecx: RegExp data (FixedArray)
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is ascii.
+ __ Set(ecx, Immediate(1)); // Type is ASCII.
__ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
@@ -3692,7 +3798,7 @@
// eax: subject string
// edx: code
- // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3701,7 +3807,7 @@
// eax: subject string
// ebx: previous index
// edx: code
- // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3741,7 +3847,7 @@
// esi: original subject string
// eax: underlying subject string
// ebx: previous index
- // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// edx: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -4369,7 +4475,7 @@
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
&check_unequal_objects);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
@@ -4844,7 +4950,7 @@
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
- // Setup frame.
+ // Set up frame.
__ push(ebp);
__ mov(ebp, esp);
@@ -5322,7 +5428,7 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
+ // At this point code register contains smi tagged ASCII char code.
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -5369,7 +5475,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
@@ -5378,14 +5484,14 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(eax, &string_add_runtime);
+ __ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
// First argument is a a string, test second.
- __ JumpIfSmi(edx, &string_add_runtime);
+ __ JumpIfSmi(edx, &call_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -5436,15 +5542,14 @@
__ add(ebx, ecx);
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
+ __ j(overflow, &call_runtime);
// Use the symbol table when adding two one character strings, as it
// helps later optimizations to return a symbol here.
__ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
+ // Check that both strings are non-external ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
__ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
@@ -5469,11 +5574,7 @@
__ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, // Result.
- 2, // Length.
- edi, // Scratch 1.
- edx, // Scratch 2.
- &string_add_runtime);
+ __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
// Pack both characters in ebx.
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
@@ -5484,11 +5585,11 @@
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
__ j(below, &string_add_flat_result);
// If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
+ // strings are ASCII the result is an ASCII cons string.
Label non_ascii, allocated, ascii_data;
__ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
@@ -5500,8 +5601,8 @@
__ test(ecx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ // Allocate an ASCII cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
@@ -5515,7 +5616,7 @@
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
+ // to contain only ASCII characters.
// ecx: first instance type AND second instance type.
// edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask));
@@ -5528,64 +5629,93 @@
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
__ jmp(&allocated);
- // Handle creating a flat result. First check that both strings are not
- // external strings.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// eax: first string
// ebx: length of resulting flat string as a smi
// edx: second string
+ Label first_prepared, second_prepared;
+ Label first_is_sequential, second_is_sequential;
__ bind(&string_add_flat_result);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // We cannot encounter sliced strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(zero, &string_add_runtime);
+ // ecx: instance type of first string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ecx, kStringRepresentationMask);
+ __ j(zero, &first_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(ecx, kShortExternalStringMask);
+ __ j(not_zero, &call_runtime);
+ __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ jmp(&first_prepared, Label::kNear);
+ __ bind(&first_is_sequential);
+ __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ bind(&first_prepared);
- // Both strings are ascii strings. As they are short they are both flat.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ // Check whether both strings have same encoding.
+ // edi: instance type of second string
+ __ xor_(ecx, edi);
+ __ test_b(ecx, kStringEncodingMask);
+ __ j(not_zero, &call_runtime);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(edi, kStringRepresentationMask);
+ __ j(zero, &second_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(edi, kShortExternalStringMask);
+ __ j(not_zero, &call_runtime);
+ __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ jmp(&second_prepared, Label::kNear);
+ __ bind(&second_is_sequential);
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ bind(&second_prepared);
+
+ // Push the addresses of both strings' first characters onto the stack.
+ __ push(edx);
+ __ push(eax);
+
+ Label non_ascii_string_add_flat_result, call_runtime_drop_two;
+ // edi: instance type of second string
+ // First string and second string have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(edi, kStringEncodingMask);
+ __ j(zero, &non_ascii_string_add_flat_result);
+
+ // Both strings are ASCII strings.
// ebx: length of resulting flat string as a smi
__ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
__ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+ // Load first argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5599,34 +5729,30 @@
// ebx: length of resulting flat string as a smi
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
+ // Both strings are two byte strings.
__ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5635,8 +5761,11 @@
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
+ // Recover stack pointer before jumping to runtime.
+ __ bind(&call_runtime_drop_two);
+ __ Drop(2);
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -5872,7 +6001,7 @@
__ push(mask);
Register temp = mask;
- // Check that the candidate is a non-external ascii string.
+ // Check that the candidate is a non-external ASCII string.
__ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
@@ -5905,10 +6034,25 @@
Register hash,
Register character,
Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, character);
+ // hash = (seed + character) + ((seed + character) << 10);
+ if (Serializer::enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ __ mov(scratch, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+ __ SmiUntag(scratch);
+ __ add(scratch, character);
+ __ mov(hash, scratch);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ } else {
+ int32_t seed = masm->isolate()->heap()->HashSeed();
+ __ lea(scratch, Operand(character, seed));
+ __ shl(scratch, 10);
+ __ lea(hash, Operand(scratch, character, times_1, seed));
+ }
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ shr(scratch, 6);
@@ -5949,14 +6093,12 @@
__ shl(scratch, 15);
__ add(hash, scratch);
- uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
- __ and_(hash, kHashShiftCutOffMask);
+ __ and_(hash, String::kHashBitMask);
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ test(hash, hash);
__ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(27));
+ __ mov(hash, Immediate(StringHasher::kZeroHash));
__ bind(&hash_not_zero);
}
@@ -5988,20 +6130,23 @@
__ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
+ Label not_original_string;
+ __ j(not_equal, ¬_original_string, Label::kNear);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+ __ bind(¬_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
+ __ cmp(ecx, Immediate(Smi::FromInt(2)));
__ j(greater, &result_longer_than_two);
__ j(less, &runtime);
// Sub string of length 2 requested.
// eax: string
// ebx: instance type
- // ecx: sub string length (value is 2)
+ // ecx: sub string length (smi, value is 2)
// edx: from index (smi)
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
@@ -6012,66 +6157,73 @@
FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
- Label make_two_character_string;
+ Label combine_two_char, save_two_char;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string, &make_two_character_string);
+ masm, ebx, ecx, eax, edx, edi, &combine_two_char, &save_two_char);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ bind(&combine_two_char);
+ __ shl(ecx, kBitsPerByte);
+ __ or_(ebx, ecx);
+ __ bind(&save_two_char);
+ __ AllocateAsciiString(eax, 2, ecx, edx, &runtime);
+ __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ test(ebx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ Factory* factory = masm->isolate()->factory();
+ __ test(ebx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+ factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and adjust start index by offset.
+ __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(edi, eax);
+
+ __ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- STATIC_ASSERT(2 < SlicedString::kMinLength);
- __ jmp(©_routine);
- __ bind(&result_longer_than_two);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length
- // edx: from index (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
- __ cmp(ecx, SlicedString::kMinLength);
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
// Short slice. Copy instead of slicing.
__ j(less, ©_routine);
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = masm->isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ mov(edi, eax);
-
- __ bind(&allocate_slice);
- // edi: underlying subject string
- // ebx: instance type of original subject string
- // edx: offset
- // ecx: length
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@@ -6088,27 +6240,49 @@
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ SmiTag(ecx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ jmp(&return_eax);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
__ bind(©_routine);
- } else {
- __ bind(&result_longer_than_two);
}
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, runtime_drop_two, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ebx, kExternalStringTag);
+ __ j(zero, &sequential_string);
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(ebx, kShortExternalStringMask);
+ __ j(not_zero, &runtime);
+ __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ // Stash away (adjusted) index and (underlying) string.
+ __ push(edx);
+ __ push(edi);
+ __ SmiUntag(ecx);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(zero, &two_byte_sequential);
+
+ // Sequential ASCII string. Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6117,11 +6291,10 @@
__ mov(edi, eax);
__ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
__ SmiUntag(ebx);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6130,20 +6303,12 @@
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+ __ bind(&two_byte_sequential);
+ // Sequential two-byte string. Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6153,14 +6318,13 @@
__ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6169,11 +6333,13 @@
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
+ // Drop pushed values on the stack before tail call.
+ __ bind(&runtime_drop_two);
+ __ Drop(2);
+
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -6328,10 +6494,10 @@
__ bind(¬_same);
- // Check that both objects are sequential ascii strings.
+ // Check that both objects are sequential ASCII strings.
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
- // Compare flat ascii strings.
+ // Compare flat ASCII strings.
// Drop arguments from the stack.
__ pop(ecx);
__ add(esp, Immediate(2 * kPointerSize));
@@ -6565,33 +6731,45 @@
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ cmp(ebx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
+ __ push(edx); // Preserve edx and eax.
+ __ push(eax);
+ __ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op_)));
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ __ pop(eax);
+ __ pop(edx);
}
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-
// Do a tail call to the rewritten stub.
__ jmp(edi);
}
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 57e66df..9eabb2a 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2649560..b37b54b 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -258,9 +258,7 @@
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index eeee4f2..14f2675 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -231,8 +231,8 @@
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -250,8 +250,8 @@
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
@@ -299,12 +299,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Next(); // Drop JS frames count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -340,9 +341,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -406,7 +405,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
@@ -437,13 +436,112 @@
}
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -463,9 +561,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index da22390..b5ddcca 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -763,10 +763,13 @@
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -788,6 +791,8 @@
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -987,7 +992,7 @@
break;
case 0x0F:
- { byte f0byte = *(data+1);
+ { byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
int mod, regop, rm;
@@ -995,6 +1000,25 @@
const char* suffix[] = {"nta", "1", "2", "3"};
AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
@@ -1130,8 +1154,12 @@
break;
case 0x66: // prefix
- data++;
- if (*data == 0x8B) {
+ while (*data == 0x66) data++;
+ if (*data == 0xf && data[1] == 0x1f) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x8B) {
data++;
data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
} else if (*data == 0x89) {
@@ -1185,6 +1213,16 @@
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("extractps %s,%s,%d",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@@ -1258,6 +1296,9 @@
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x90) {
+ data++;
+ AppendToBuffer("nop"); // 2 byte nop.
} else if (*data == 0xF3) {
data++;
int mod, regop, rm;
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 45b847a..9e51857 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -95,9 +95,11 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
- static const int kFixedFrameSize = 4; // Currently unused.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -123,6 +125,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ef4f0c5..ede810c 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -106,7 +106,7 @@
// formal parameter count expected by the function.
//
// The live registers are:
-// o edi: the JS function object being called (ie, ourselves)
+// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -227,7 +227,7 @@
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ SafePush(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub and/or New...:
+ // Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
@@ -967,7 +967,7 @@
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
@@ -2883,7 +2883,7 @@
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
@@ -3571,7 +3571,7 @@
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ascii character value.
+ // Replace separator with its ASCII character value.
__ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ mov_b(separator_operand, scratch);
@@ -3787,7 +3787,7 @@
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
- __ push(isolate()->factory()->true_value());
+ __ Push(isolate()->factory()->true_value());
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
@@ -3795,7 +3795,7 @@
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
- __ push(isolate()->factory()->false_value());
+ __ Push(isolate()->factory()->false_value());
}
__ bind(&done);
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index e93353e..3a93790 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -473,7 +473,6 @@
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
-
__ bind(&check_number_dictionary);
__ mov(ebx, eax);
__ SmiUntag(ebx);
@@ -535,14 +534,34 @@
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
__ xor_(ecx, edi);
- __ and_(ecx, KeyedLookupCache::kCapacityMask);
+ __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ mov(edi, ecx);
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ mov(edi, ecx);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ if (i != 0) {
+ __ add(edi, Immediate(kPointerSize * i * 2));
+ }
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &try_next_entry);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ __ lea(edi, Operand(ecx, 1));
__ shl(edi, kPointerSizeLog2 + 1);
+ __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
@@ -556,13 +575,25 @@
// ecx : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ mov(edi,
- Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, ecx);
- __ j(above_equal, &property_array_property);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ add(ecx, Immediate(i));
+ }
+ __ mov(edi,
+ Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, ecx);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
@@ -1374,10 +1405,10 @@
// -- esp[0] : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
// (currently anything except for external arrays which means anything with
- // elements of FixedArray type.), but currently is restricted to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1399,6 +1430,13 @@
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1625,6 +1663,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 33adc21..46a35b6 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -341,24 +341,21 @@
}
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ return literal;
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
}
-Immediate LCodeGen::ToImmediate(LOperand* op) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Immediate(static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- }
- ASSERT(r.IsTagged());
- return Immediate(literal);
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsInteger32();
}
@@ -397,7 +394,11 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -518,7 +519,7 @@
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
- LoadHeapObject(esi, Handle<Context>::cast(literal));
+ __ LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
}
@@ -546,10 +547,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -1160,7 +1165,7 @@
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToImmediate(right));
+ __ sub(ToOperand(left), ToInteger32Immediate(right));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
@@ -1219,7 +1224,7 @@
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
- LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+ __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
@@ -1299,7 +1304,7 @@
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToImmediate(right));
+ __ add(ToOperand(left), ToInteger32Immediate(right));
} else {
__ add(ToRegister(left), ToOperand(right));
}
@@ -1571,9 +1576,9 @@
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToImmediate(right));
+ __ cmp(ToRegister(left), ToInteger32Immediate(right));
} else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToImmediate(left));
+ __ cmp(ToOperand(right), ToInteger32Immediate(left));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
@@ -1834,7 +1839,7 @@
// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@@ -1842,7 +1847,8 @@
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -1901,12 +1907,7 @@
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
- if (input.is(temp)) {
- // Swap.
- Register swapper = temp;
- temp = temp2;
- temp2 = swapper;
- }
+
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2031,7 +2032,7 @@
// the stub.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ mov(InstanceofStub::right(), Immediate(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
@@ -2118,26 +2119,20 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register object = ToRegister(instr->TempAt(0));
- Register address = ToRegister(instr->TempAt(1));
- Register value = ToRegister(instr->InputAt(0));
- ASSERT(!value.is(object));
- Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
-
- int offset = JSGlobalPropertyCell::kValueOffset;
- __ mov(object, Immediate(cell_handle));
+ Register value = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
+ __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(FieldOperand(object, offset), value);
+ __ mov(Operand::Cell(cell_handle), value);
// Cells are always rescanned, so no write barrier here.
}
@@ -2159,13 +2154,38 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, ContextOperand(context, instr->slot_index()));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ mov(result, factory()->undefined_value());
+ __ bind(&is_not_hole);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ mov(ContextOperand(context, instr->slot_index()), value);
+
+ Label skip_assignment;
+
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(target, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment, Label::kNear);
+ }
+ }
+
+ __ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@@ -2180,6 +2200,8 @@
EMIT_REMEMBERED_SET,
check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2201,7 +2223,7 @@
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2217,7 +2239,24 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
+ }
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
+ } else {
+ __ push(ToOperand(operand));
}
}
@@ -2627,17 +2666,13 @@
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- __ push(ToImmediate(argument));
- } else {
- __ push(ToOperand(argument));
- }
+ EmitPushTaggedOperand(argument);
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2673,41 +2708,53 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- } else {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
-
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(eax, arity);
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(ecx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
+ if (can_invoke_directly) {
+ __ LoadHeapObject(edi, function);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+
+ if (change_context) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ } else {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+
+ // Set eax to arguments count if adaption is not needed. Assumes that eax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ mov(eax, arity);
+ }
+
+ // Invoke function directly.
+ __ SetCallKind(ecx, call_kind);
+ if (*function == *info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ // We need to adapt arguments.
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2922,72 +2969,90 @@
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, single-precision
+ // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000);
+ __ movd(xmm_scratch, scratch);
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsDouble()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- } else if (exponent_type.IsInteger32()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!ToRegister(right).is(ebx));
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
- __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
- 4);
- } else {
- ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiUntag(right_reg);
- __ cvtsi2sd(result_reg, Operand(right_reg));
- __ jmp(&call);
-
- __ bind(&non_smi);
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!right_reg.is(ebx));
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(eax, &no_deopt);
+ __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr->environment());
- __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
+}
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm2, ebx);
+ __ movd(xmm1, eax);
+ __ cvtss2sd(xmm2, xmm2);
+ __ xorps(xmm1, xmm2);
+ __ subsd(xmm1, xmm2);
}
@@ -3060,9 +3125,6 @@
case kMathSqrt:
DoMathSqrt(instr);
break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
case kMathCos:
DoMathCos(instr);
break;
@@ -3147,7 +3209,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3229,7 +3290,7 @@
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
__ cmp(ToOperand(instr->length()),
- ToImmediate(LConstantOperand::cast(instr->index())));
+ Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
DeoptimizeIf(below_equal, instr->environment());
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3283,13 +3344,6 @@
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ test(value, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3512,16 +3566,8 @@
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- if (instr->left()->IsConstantOperand()) {
- __ push(ToImmediate(instr->left()));
- } else {
- __ push(ToOperand(instr->left()));
- }
- if (instr->right()->IsConstantOperand()) {
- __ push(ToImmediate(instr->right()));
- } else {
- __ push(ToOperand(instr->right()));
- }
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3671,8 +3717,10 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
+ Register temp_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Label load_smi, done;
@@ -3701,6 +3749,15 @@
}
// Heap number to XMM conversion.
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(result_reg, xmm_scratch);
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(temp_reg, result_reg);
+ __ test_b(temp_reg, 1);
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
// Smi to XMM conversion
@@ -3823,14 +3880,23 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
+ LOperand* temp = instr->TempAt(0);
+ ASSERT(temp == NULL || temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg,
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
instr->environment());
}
@@ -4020,19 +4086,29 @@
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, instr->hydrogen()->target());
+ __ cmp(operand, target);
}
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMap(reg, map, &success, mode);
+ DeoptimizeIf(not_equal, env);
+ __ bind(&success);
+}
+
+
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@@ -4084,17 +4160,6 @@
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand::Cell(cell));
- } else {
- __ mov(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -4102,39 +4167,53 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2". We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ebx, Map::kElementsKindMask);
+ __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
- // Setup the parameters to the stub/runtime call.
+ // Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4150,9 +4229,9 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4167,7 +4246,7 @@
ASSERT(!result.is(ecx));
if (FLAG_debug_code) {
- LoadHeapObject(ecx, object);
+ __ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
__ Assert(equal, "Unexpected object literal boilerplate");
}
@@ -4197,10 +4276,10 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
@@ -4225,7 +4304,7 @@
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
ASSERT_EQ(size, offset);
}
@@ -4236,7 +4315,7 @@
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- // Setup the parameters to the stub/runtime call.
+ // Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
@@ -4347,11 +4426,7 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(1);
- if (input->IsConstantOperand()) {
- __ push(ToImmediate(input));
- } else {
- __ push(ToOperand(input));
- }
+ EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4475,9 +4550,7 @@
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- while (padding_size-- > 0) {
- __ nop();
- }
+ __ Nop(padding_size);
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
@@ -4501,11 +4574,7 @@
LOperand* obj = instr->object();
LOperand* key = instr->key();
__ push(ToOperand(obj));
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
+ EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@@ -4602,16 +4671,8 @@
void LCodeGen::DoIn(LIn* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
- if (obj->IsConstantOperand()) {
- __ push(ToImmediate(obj));
- } else {
- __ push(ToOperand(obj));
- }
+ EmitPushTaggedOperand(key);
+ EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 9d1a4f7..d86d48c 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,7 +78,13 @@
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
- Immediate ToImmediate(LOperand* op);
+
+ bool IsInteger32(LConstantOperand* op) const;
+ Immediate ToInteger32Immediate(LOperand* op) const {
+ return Immediate(ToInteger32(LConstantOperand::cast(op)));
+ }
+
+ Handle<Object> ToHandle(LConstantOperand* op) const;
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
@@ -104,6 +110,9 @@
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
+ void DoCheckMapCommon(Register reg, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -207,8 +216,6 @@
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -227,6 +234,7 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
+
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@@ -239,7 +247,6 @@
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
@@ -261,8 +268,10 @@
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
+ Register temp,
XMMRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -306,6 +315,10 @@
void EnsureSpaceForLazyDeopt();
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -372,7 +385,7 @@
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index fcf1f91..510d9f1 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -303,14 +303,24 @@
}
} else if (source->IsConstantOperand()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Immediate src = cgen_->ToImmediate(source);
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ Set(dst, src);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
} else {
+ ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ Set(dst, src);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ } else {
+ Register tmp = EnsureTempRegister();
+ __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+ __ mov(dst, tmp);
+ }
}
} else if (source->IsDoubleRegister()) {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4e5f278..c81aca8 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -298,6 +298,12 @@
}
+void LMathPowHalf::PrintDataTo(StringStream* stream) {
+ stream->Add("/pow_half ");
+ InputAt(0)->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1007,15 +1013,17 @@
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1024,13 +1032,17 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new(zone()) LArgument((*argument_index_accumulator)++);
+ op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1041,22 +1053,31 @@
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- ASSERT(v->IsConstant());
- ASSERT(!v->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
}
+
+ // Untagged integers or doubles, smis and booleans don't require a
+ // deoptimization environment nor a temp register.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
+ return new(zone()) LBranch(UseRegister(value), NULL);
+ }
+
ToBooleanStub::Types expected = instr->expected_input_types();
// We need a temporary register when we have to access the map *or* we have
// no type info yet, in which case we handle all cases (including the ones
// involving maps).
bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
+ return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
}
@@ -1184,6 +1205,11 @@
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
+ if (op == kMathPowHalf) {
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ return DefineSameAsFirst(result);
+ }
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
switch (op) {
@@ -1195,8 +1221,6 @@
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@@ -1379,7 +1403,11 @@
temp = TempRegister();
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
@@ -1437,9 +1465,9 @@
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm2) :
+ UseFixedDouble(instr->right(), xmm1) :
UseFixed(instr->right(), eax);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
@@ -1447,6 +1475,15 @@
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
+ LRandom* result = new(zone()) LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1579,9 +1616,9 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
+ return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
}
@@ -1607,7 +1644,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
+ return DefineSameAsFirst(result);
}
@@ -1651,7 +1688,11 @@
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
+ // Temp register only necessary for minus zero check.
+ LOperand* temp = instr->deoptimize_on_minus_zero()
+ ? TempRegister()
+ : NULL;
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1847,9 +1888,7 @@
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
+ new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1866,7 +1905,9 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1881,7 +1922,8 @@
value = UseRegister(instr->value());
temp = NULL;
}
- return new(zone()) LStoreContextSlot(context, value, temp);
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1946,7 +1988,8 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1965,12 +2008,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@@ -2013,8 +2055,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
@@ -2034,13 +2075,12 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2344,6 +2384,7 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2354,7 +2395,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 5170647..67bf937 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,6 +123,7 @@
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -134,6 +135,7 @@
V(OuterContext) \
V(Parameter) \
V(Power) \
+ V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -582,6 +584,24 @@
};
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1024,6 +1044,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1250,16 +1281,16 @@
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1593,10 +1624,11 @@
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberUntagD(LOperand* value) {
+ explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index fcae7a2..d0d9e19 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -357,6 +357,14 @@
}
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ // see ROOT_ACCESSOR macro in factory.h
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -479,15 +487,48 @@
}
+void MacroAssembler::CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
+ }
+ }
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+
+ Label success;
+ CompareMap(obj, map, &success, mode);
j(not_equal, fail);
+ bind(&success);
}
@@ -608,7 +649,7 @@
void MacroAssembler::EnterExitFramePrologue() {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
@@ -660,7 +701,7 @@
void MacroAssembler::EnterExitFrame(bool save_doubles) {
EnterExitFramePrologue();
- // Setup argc and argv in callee-saved registers.
+ // Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
@@ -755,7 +796,7 @@
// Push the state and the code object.
push(Immediate(state));
- push(CodeObject());
+ Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -951,6 +992,50 @@
}
+// Compute the hash code from the untagged key. This must be kept in sync
+// with ComputeIntegerHash in utils.h.
+//
+// Note: r0 will contain hash code
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+ // Xor original key with a seed.
+ if (Serializer::enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ mov(scratch,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ SmiUntag(scratch);
+ xor_(r0, scratch);
+ } else {
+ int32_t seed = isolate()->heap()->HashSeed();
+ xor_(r0, Immediate(seed));
+ }
+
+ // hash = ~hash + (hash << 15);
+ mov(scratch, r0);
+ not_(r0);
+ shl(scratch, 15);
+ add(r0, scratch);
+ // hash = hash ^ (hash >> 12);
+ mov(scratch, r0);
+ shr(scratch, 12);
+ xor_(r0, scratch);
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(scratch, r0);
+ shr(scratch, 4);
+ xor_(r0, scratch);
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(scratch, r0);
+ shr(scratch, 16);
+ xor_(r0, scratch);
+}
+
+
+
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@@ -976,33 +1061,10 @@
Label done;
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- mov(r1, r0);
- not_(r0);
- shl(r1, 15);
- add(r0, r1);
- // hash = hash ^ (hash >> 12);
- mov(r1, r0);
- shr(r1, 12);
- xor_(r0, r1);
- // hash = hash + (hash << 2);
- lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- mov(r1, r0);
- shr(r1, 4);
- xor_(r0, r1);
- // hash = hash * 2057;
- imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- mov(r1, r0);
- shr(r1, 16);
- xor_(r0, r1);
+ GetNumberHash(r0, r1);
// Compute capacity mask.
- mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
@@ -1013,19 +1075,19 @@
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
- NumberDictionary::kElementsStartOffset));
+ SeededNumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
j(equal, &done);
} else {
@@ -1036,7 +1098,7 @@
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
@@ -1044,7 +1106,7 @@
// Get the value at the masked, scaled index.
const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -1325,7 +1387,7 @@
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
@@ -1353,7 +1415,7 @@
Label* gc_required) {
ASSERT(length > 0);
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::SizeFor(length),
result,
scratch1,
@@ -1871,11 +1933,13 @@
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
@@ -1891,6 +1955,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
mov(ebx, expected.immediate());
}
}
@@ -1928,7 +1993,9 @@
SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
- jmp(done, done_near);
+ if (!*definitely_mismatches) {
+ jmp(done, done_near);
+ }
} else {
SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1948,20 +2015,23 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
+ bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, flag, Label::kNear, call_wrapper,
- call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code);
+ &done, &definitely_mismatches, flag, Label::kNear,
+ call_wrapper, call_kind);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(ecx, call_kind);
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
+ jmp(code);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -1977,19 +2047,22 @@
Label done;
Operand dummy(eax, 0);
- InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
- call_wrapper, call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
+ flag, Label::kNear, call_wrapper, call_kind);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code, rmode));
+ SetCallKind(ecx, call_kind);
+ call(code, rmode);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
+ jmp(code, rmode);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -2022,7 +2095,7 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(edi, Immediate(function));
+ LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2151,6 +2224,29 @@
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ mov(result, Operand::Cell(cell));
+ } else {
+ mov(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ push(Operand::Cell(cell));
+ } else {
+ Push(object);
+ }
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -2182,11 +2278,6 @@
}
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, value);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2384,7 +2475,7 @@
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 03ec28a..0fcb94f 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,10 +237,21 @@
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Set(result, Immediate(object));
+ }
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in ecx. The method takes ecx as an
+ // Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -305,8 +316,9 @@
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
- // Compare a register against a known root, e.g. undefined, null, true, ...
+ // Compare against a known root, e.g. undefined, null, true, ...
void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -344,13 +356,24 @@
Label* fail,
bool specialize_for_processor);
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+ // result of map compare. If multiple map compares are required, the compare
+ // sequences branches to early_success.
+ void CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
- // heap object)
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
@@ -474,6 +497,7 @@
Register scratch,
Label* miss);
+ void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@@ -718,10 +742,8 @@
// Move if the registers are not identical.
void Move(Register target, Register source);
- void Move(Register target, Handle<Object> value);
-
// Push a handle value.
- void Push(Handle<Object> handle) { push(handle); }
+ void Push(Handle<Object> handle) { push(Immediate(handle)); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
@@ -769,7 +791,7 @@
// ---------------------------------------------------------------------------
// String utilities.
- // Check whether the instance type represents a flat ascii string. Jump to the
+ // Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
@@ -805,6 +827,7 @@
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_distance,
const CallWrapper& call_wrapper = NullCallWrapper(),
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index dbf01ab..e613a06 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -210,7 +210,7 @@
bool check_end_of_string) {
#ifdef DEBUG
// If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
+ // match contains a non-ASCII character.
if (mode_ == ASCII) {
ASSERT(String::IsAscii(str.start(), str.length()));
}
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 722d718..f6f4241 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -429,7 +429,7 @@
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(edi, Immediate(function));
+ __ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
@@ -695,13 +695,9 @@
Register name_reg,
Register scratch,
Label* miss_label) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, miss_label);
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -878,13 +874,10 @@
if (in_new_space) {
// Save the map in scratch1 for later.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Immediate(current_map));
- } else {
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(current_map));
}
- // Branch on the result of the map check.
- __ j(not_equal, miss);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -916,9 +909,8 @@
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss);
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1000,7 +992,7 @@
__ push(scratch3); // Restore return address.
- // 3 elements array for v8::Agruments::values_, handler for name and pointer
+ // 3 elements array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
const int kStackSpace = 5;
const int kApiArgc = 2;
@@ -1025,7 +1017,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1036,7 +1028,7 @@
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(eax, value);
+ __ LoadHeapObject(eax, value);
__ ret(0);
}
@@ -1061,7 +1053,7 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -2338,7 +2330,7 @@
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
- // Setup the context (function already in edi).
+ // Set up the context (function already in edi).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2403,13 +2395,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
+ __ CheckMap(edx, Handle<Map>(object->map()),
+ &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2453,13 +2441,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss);
+ __ CheckMap(edx, Handle<Map>(receiver->map()),
+ &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2715,7 +2699,7 @@
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -2877,7 +2861,7 @@
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver