Merge V8 at r7668: Initial merge by Git.
Change-Id: I1703c8b4f5c63052451a22cf3fb878abc9a0ec75
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 1da3f81..a9247f4 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -225,9 +225,9 @@
StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
@@ -237,7 +237,7 @@
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e6d245e..9273037 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -48,24 +48,37 @@
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-CpuFeatures::CpuFeatures()
- : supported_(0),
- enabled_(0),
- found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe(bool portable) {
- ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
ASSERT(supported_ == 0);
- if (portable && Serializer::enabled()) {
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
- Assembler assm(NULL, 0);
+ const int kBufferSize = 4 * KB;
+ VirtualMemory* memory = new VirtualMemory(kBufferSize);
+ if (!memory->IsReserved()) {
+ delete memory;
+ return;
+ }
+ ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+ if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+ delete memory;
+ return;
+ }
+
+ Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old esp, since we are going to modify the stack.
@@ -119,27 +132,15 @@
__ ret(0);
#undef __
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code;
- { MaybeObject* maybe_code =
- assm.isolate()->heap()->CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>::null());
- if (!maybe_code->ToObject(&code)) return;
- }
- if (!code->IsCode()) return;
-
- PROFILE(ISOLATE,
- CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
- found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+ found_by_runtime_probing_ &= ~os_guarantees;
+
+ delete memory;
}
@@ -297,8 +298,8 @@
static void InitCoverageLog();
#endif
-Assembler::Assembler(void* buffer, int buffer_size)
- : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
@@ -386,7 +387,7 @@
void Assembler::cpuid() {
- ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+ ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -747,7 +748,7 @@
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -758,7 +759,7 @@
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -769,7 +770,7 @@
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
@@ -1450,7 +1451,7 @@
void Assembler::rdtsc() {
- ASSERT(isolate()->cpu_features()->IsEnabled(RDTSC));
+ ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -1856,7 +1857,7 @@
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
@@ -1865,7 +1866,7 @@
void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDD);
@@ -2134,7 +2135,7 @@
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -2145,7 +2146,7 @@
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2156,7 +2157,7 @@
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2167,7 +2168,7 @@
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -2178,7 +2179,7 @@
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2189,7 +2190,7 @@
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2200,7 +2201,7 @@
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2211,7 +2212,7 @@
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2222,7 +2223,7 @@
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2233,7 +2234,7 @@
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2264,7 +2265,7 @@
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2275,7 +2276,7 @@
void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2286,7 +2287,7 @@
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2298,7 +2299,7 @@
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -2308,7 +2309,7 @@
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2319,7 +2320,7 @@
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2330,7 +2331,7 @@
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -2341,7 +2342,7 @@
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -2352,7 +2353,7 @@
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2364,7 +2365,7 @@
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2400,7 +2401,7 @@
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2411,7 +2412,7 @@
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2422,7 +2423,7 @@
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2433,7 +2434,7 @@
void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3); // float
@@ -2444,7 +2445,7 @@
void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3); // float
@@ -2455,7 +2456,7 @@
void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -2466,7 +2467,7 @@
void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2477,7 +2478,7 @@
void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2488,7 +2489,7 @@
void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2499,7 +2500,7 @@
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2510,7 +2511,7 @@
void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2521,7 +2522,7 @@
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2533,7 +2534,7 @@
void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2545,7 +2546,7 @@
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2556,7 +2557,7 @@
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2568,7 +2569,7 @@
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2579,7 +2580,7 @@
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2591,7 +2592,7 @@
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2604,7 +2605,7 @@
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 8e0c762..079dca7 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -446,16 +446,15 @@
// } else {
// // Generate standard x87 floating point code.
// }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
public:
- // Detect features of the target CPU. If the portable flag is set,
- // the method sets safe defaults if the serializer is enabled
- // (snapshots must be portable).
- void Probe(bool portable);
- void Clear() { supported_ = 0; }
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
// Check whether a feature is supported by the target CPU.
- bool IsSupported(CpuFeature f) const {
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
@@ -463,46 +462,85 @@
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
+
+#ifdef DEBUG
// Check whether a feature is currently enabled.
- bool IsEnabled(CpuFeature f) const {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ uint64_t enabled = isolate->enabled_cpu_features();
+ return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
}
+#endif
+
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f)
- : cpu_features_(Isolate::Current()->cpu_features()),
- isolate_(Isolate::Current()) {
+ explicit Scope(CpuFeature f) {
uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(cpu_features_->IsSupported(f));
+ ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
- (cpu_features_->found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = cpu_features_->enabled_;
- cpu_features_->enabled_ |= mask;
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = isolate_->enabled_cpu_features();
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
}
~Scope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- cpu_features_->enabled_ = old_enabled_;
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
}
private:
- uint64_t old_enabled_;
- CpuFeatures* cpu_features_;
Isolate* isolate_;
+ uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const uint64_t old_supported_;
+ };
+
private:
- CpuFeatures();
-
- uint64_t supported_;
- uint64_t enabled_;
- uint64_t found_by_runtime_probing_;
-
- friend class Isolate;
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static uint64_t supported_;
+ static uint64_t found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -535,7 +573,8 @@
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ // TODO(vitalyr): the assembler does not need an isolate.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2970a0e..29c67b5 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -1523,12 +1523,8 @@
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // We shouldn't be performing on-stack replacement in the first
- // place if the CPU features we need for the optimized Crankshaft
- // code aren't supported.
- CpuFeatures* cpu_features = masm->isolate()->cpu_features();
- cpu_features->Probe(false);
- if (!cpu_features->IsSupported(SSE2)) {
+ CpuFeatures::TryForceFeatureScope scope(SSE2);
+ if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 96faae9..275e8e2 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -291,166 +291,6 @@
}
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
@@ -534,762 +374,6 @@
};
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
-
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
- }
- }
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, ¬_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(¬_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
- Label slow;
- if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- }
-
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
-
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(¬_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
-
- // Avoid hitting the string ADD code below when allocation fails in
- // the floating point code above.
- if (op_ != Token::ADD) {
- __ bind(&call_runtime);
- }
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
-
- // Test if left operand is a string.
- NearLabel lhs_not_string;
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &lhs_not_string);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &lhs_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- __ TailCallStub(&string_add_left_stub);
-
- NearLabel call_runtime_with_args;
- // Left operand is not a string, test right.
- __ bind(&lhs_not_string);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime_with_args);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime_with_args);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
- __ bind(&call_runtime_with_args);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- __ pop(ecx); // Save return address.
-
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
@@ -1362,6 +446,9 @@
case TRBinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
+ case TRBinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -1660,7 +747,7 @@
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -1705,7 +792,7 @@
break;
}
__ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
switch (op_) {
@@ -1825,6 +912,38 @@
}
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ __ test(left, Immediate(kSmiTagMask));
+ __ j(zero, &call_runtime);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ // Test if right operand is a string.
+ __ test(right, Immediate(kSmiTagMask));
+ __ j(zero, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -1837,7 +956,7 @@
case Token::DIV: {
Label not_floats;
Label not_int32;
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
@@ -1958,7 +1077,7 @@
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2036,23 +1155,25 @@
GenerateAddStrings(masm);
}
+ Factory* factory = masm->isolate()->factory();
+
// Convert odd ball arguments to numbers.
NearLabel check, done;
- __ cmp(edx, FACTORY->undefined_value());
+ __ cmp(edx, factory->undefined_value());
__ j(not_equal, &check);
if (Token::IsBitOp(op_)) {
__ xor_(edx, Operand(edx));
} else {
- __ mov(edx, Immediate(FACTORY->nan_value()));
+ __ mov(edx, Immediate(factory->nan_value()));
}
__ jmp(&done);
__ bind(&check);
- __ cmp(eax, FACTORY->undefined_value());
+ __ cmp(eax, factory->undefined_value());
__ j(not_equal, &done);
if (Token::IsBitOp(op_)) {
__ xor_(eax, Operand(eax));
} else {
- __ mov(eax, Immediate(FACTORY->nan_value()));
+ __ mov(eax, Immediate(factory->nan_value()));
}
__ bind(&done);
@@ -2070,7 +1191,7 @@
case Token::MUL:
case Token::DIV: {
Label not_floats;
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
@@ -2173,7 +1294,7 @@
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2275,7 +1396,7 @@
case Token::MUL:
case Token::DIV: {
Label not_floats;
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
@@ -2373,7 +1494,7 @@
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2572,7 +1693,7 @@
__ bind(&loaded);
} else { // UNTAGGED.
- if (masm->isolate()->cpu_features()->IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
@@ -2826,8 +1947,7 @@
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
- if (type_info.IsInteger32() &&
- masm->isolate()->cpu_features()->IsEnabled(SSE2)) {
+ if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
return;
@@ -3375,7 +2495,7 @@
IntegerConvert(masm,
eax,
TypeInfo::Unknown(),
- masm->isolate()->cpu_features()->IsSupported(SSE3),
+ CpuFeatures::IsSupported(SSE3),
&slow);
// Do the bitwise operation and check if the result fits in a smi.
@@ -3398,7 +2518,7 @@
__ AllocateHeapNumber(ebx, edx, edi, &slow);
__ mov(eax, Operand(ebx));
}
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -4270,7 +3390,7 @@
FixedArray::kHeaderSize));
__ test(probe, Immediate(kSmiTagMask));
__ j(zero, not_found);
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
@@ -4509,7 +3629,7 @@
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
- if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
@@ -6455,8 +5575,7 @@
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
- CpuFeatures* cpu_features = masm->isolate()->cpu_features();
- if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
+ if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 31fa645..cf73682 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -72,161 +72,6 @@
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
-
- void SetArgsInRegisters() {
- ASSERT(ArgsInRegistersSupported());
- args_in_registers_ = true;
- }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -235,7 +80,7 @@
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
- use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -308,6 +153,7 @@
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
deleted file mode 100644
index 49c706d..0000000
--- a/src/ia32/codegen-ia32-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_IA32_CODEGEN_IA32_INL_H_
-#define V8_IA32_CODEGEN_IA32_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index cf990a0..572c36c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,81 +29,15 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -114,10069 +48,21 @@
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- in_safe_int32_mode_(false),
- safe_int32_mode_enabled_(true),
- function_return_is_shadowed_(false),
- in_spilled_code_(false),
- jit_cookie_((FLAG_mask_constants_with_cookie) ?
- V8::RandomPrivate(Isolate::Current()) : 0) {
-}
-
-
-// Calling conventions:
-// ebp: caller's frame pointer
-// esp: stack pointer
-// edi: called JS function
-// esi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = ®ister_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- masm()->isolate()->set_jump_target_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments, return address.
- // ebp: caller's frame pointer
- // esp: stack pointer
- // edi: called JS function
- // esi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(FACTORY->the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = info->isolate()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(FACTORY->undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- info->isolate()->set_jump_target_compiling_deferred_code(true);
- ProcessDeferred();
- info->isolate()->set_jump_target_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(esi)); // do not overwrite context register
- Register context = esi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = esi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
- BreakTarget* unsafe_bailout) {
- set_unsafe_bailout(unsafe_bailout);
- set_in_safe_int32_mode(true);
- Load(expr);
- Result value = frame_->Pop();
- ASSERT(frame_->HasNoUntaggedInt32Elements());
- if (expr->GuaranteedSmiResult()) {
- ConvertInt32ResultToSmi(&value);
- } else {
- ConvertInt32ResultToNumber(&value);
- }
- set_in_safe_int32_mode(false);
- set_unsafe_bailout(NULL);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
- set_safe_int32_mode_enabled(false);
- Load(expr);
- set_safe_int32_mode_enabled(true);
-}
-
-
-void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- __ add(value->reg(), Operand(value->reg()));
- } else {
- ASSERT(value->is_constant());
- ASSERT(value->handle()->IsSmi());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Smi());
-}
-
-
-void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- Register val = value->reg();
- JumpTarget done;
- __ add(val, Operand(val));
- done.Branch(no_overflow, value);
- __ sar(val, 1);
- // If there was an overflow, bits 30 and 31 of the original number disagree.
- __ xor_(val, 0x80000000u);
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, Operand(val));
- } else {
- // Move val to ST[0] in the FPU
- // Push and pop are safe with respect to the virtual frame because
- // all synced elements are below the actual stack pointer.
- __ push(val);
- __ fild_s(Operand(esp, 0));
- __ pop(val);
- }
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_register());
- Label allocation_failed;
- __ AllocateHeapNumber(val, scratch.reg(),
- no_reg, &allocation_failed);
- VirtualFrame* clone = new VirtualFrame(frame_);
- scratch.Unuse();
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
- }
- done.Jump(value);
-
- // Establish the virtual frame, cloned from where AllocateHeapNumber
- // jumped to allocation_failed.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&allocation_failed);
- if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- // Pop the value from the floating point stack.
- __ fstp(0);
- }
- unsafe_bailout_->Jump();
-
- done.Bind(value);
- } else {
- ASSERT(value->is_constant());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Integer32());
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
-
- // If the expression should be a side-effect-free 32-bit int computation,
- // compile that SafeInt32 path, and a bailout path.
- if (!in_safe_int32_mode() &&
- safe_int32_mode_enabled() &&
- expr->side_effect_free() &&
- expr->num_bit_ops() > 2 &&
- masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- BreakTarget unsafe_bailout;
- JumpTarget done;
- unsafe_bailout.set_expected_height(frame_->height());
- LoadInSafeInt32Mode(expr, &unsafe_bailout);
- done.Jump();
-
- if (unsafe_bailout.is_linked()) {
- unsafe_bailout.Bind();
- LoadWithSafeInt32ModeDisabled(expr);
- }
- done.Bind();
- } else {
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(FACTORY->false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(FACTORY->true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(FACTORY->false_value());
- }
- loaded.Bind();
- }
- }
- }
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ mov(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ mov(reg, GlobalObjectOperand());
- __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
- // In strict mode there is no need for shadow arguments.
- ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(FACTORY->arguments_marker());
- } else {
- ArgumentsAccessStub stub(is_strict_mode()
- ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
-
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
- scope()->is_strict_mode());
-
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- if (shadow != NULL) {
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- }
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If eax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into eax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(eax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_integer32()) { // Also takes Smi case.
- Comment cmnt(masm_, "ONLY_INTEGER_32");
- if (FLAG_debug_code) {
- Label ok;
- __ AbortIfNotNumber(value.reg());
- __ test(value.reg(), Immediate(kSmiTagMask));
- __ j(zero, &ok);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- __ j(not_zero, &ok);
- __ Abort("Smi was wrapped in HeapNumber in output from bitop");
- __ bind(&ok);
- }
- // In the integer32 case there are no Smis hidden in heap numbers, so we
- // need only test for Smi zero.
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- value.Unuse();
- dest->Split(not_zero);
- } else if (value.is_number()) {
- Comment cmnt(masm_, "ONLY_NUMBER");
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- value.Unuse();
- dest->Split(not_zero);
- } else {
- // Fast case checks.
- // 'false' => false.
- __ cmp(value.reg(), FACTORY->false_value());
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ cmp(value.reg(), FACTORY->true_value());
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ cmp(value.reg(), FACTORY->undefined_value());
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ test(temp.reg(), Operand(temp.reg()));
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Perform or call the specialized stub for a binary operation. Requires the
-// three registers left, right and dst to be distinct and spilled. This
-// deferred operation has up to three entry points: The main one calls the
-// runtime system. The second is for when the result is a non-Smi. The
-// third is for when at least one of the inputs is non-Smi and we have SSE2.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right),
- left_info_(left_info), right_info_(right_info), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- ASSERT(!left.is(right));
- }
-
- virtual void Generate();
-
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit().
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToAnswerOutOfRange(Condition cond);
- void JumpToConstantRhs(Condition cond, Smi* smi_value);
- Label* NonSmiInputLabel();
-
- private:
- void GenerateAnswerOutOfRange();
- void GenerateNonSmiInput();
-
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- TypeInfo left_info_;
- TypeInfo right_info_;
- OverwriteMode mode_;
- Label answer_out_of_range_;
- Label non_smi_input_;
- Label constant_rhs_;
- Smi* smi_value_;
-};
-
-
-Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
- if (Token::IsBitOp(op_) &&
- masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- return &non_smi_input_;
- } else {
- return entry_label();
- }
-}
-
-
-void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
- __ j(cond, &answer_out_of_range_);
-}
-
-
-void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
- Smi* smi_value) {
- smi_value_ = smi_value;
- __ j(cond, &constant_rhs_);
-}
-
-
-void DeferredInlineBinaryOperation::Generate() {
- // Registers are not saved implicitly for this stub, so we should not
- // tread on the registers that were not passed to us.
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
- ((op_ == Token::ADD) ||
- (op_ == Token::SUB) ||
- (op_ == Token::MUL) ||
- (op_ == Token::DIV))) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label call_runtime, after_alloc_failure;
- Label left_smi, right_smi, load_right, do_op;
- if (!left_info_.IsSmi()) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(zero, &left_smi);
- if (!left_info_.IsNumber()) {
- __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left_);
- }
- __ SmiUntag(left_);
- __ cvtsi2sd(xmm0, Operand(left_));
- __ SmiTag(left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&load_right);
- if (!right_info_.IsSmi()) {
- __ test(right_, Immediate(kSmiTagMask));
- __ j(zero, &right_smi);
- if (!right_info_.IsNumber()) {
- __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right_);
- }
- __ SmiUntag(right_);
- __ cvtsi2sd(xmm1, Operand(right_));
- __ SmiTag(right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- Exit();
-
-
- __ bind(&after_alloc_failure);
- __ pop(left_);
- __ bind(&call_runtime);
- }
- // Register spilling is not done implicitly for this stub.
- // We can't postpone it any more now though.
- SaveRegisters();
-
- GenericBinaryOpStub stub(op_,
- mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(left_info_, right_info_));
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
- RestoreRegisters();
- Exit();
-
- if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
- GenerateNonSmiInput();
- }
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
- // We know at least one of the inputs was not a Smi.
- // This is a third entry point into the deferred code.
- // We may not overwrite left_ because we want to be able
- // to call the handling code for non-smi answer and it
- // might want to overwrite the heap number in left_.
- ASSERT(!right_.is(dst_));
- ASSERT(!left_.is(dst_));
- ASSERT(!left_.is(right_));
- // This entry point is used for bit ops where the right hand side
- // is a constant Smi and the left hand side is a heap object. It
- // is also used for bit ops where both sides are unknown, but where
- // at least one of them is a heap object.
- bool rhs_is_constant = constant_rhs_.is_linked();
- // We can't generate code for both cases.
- ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
-
- if (FLAG_debug_code) {
- __ int3(); // We don't fall through into this code.
- }
-
- __ bind(&non_smi_input_);
-
- if (rhs_is_constant) {
- __ bind(&constant_rhs_);
- // In this case the input is a heap object and it is in the dst_ register.
- // The left_ and right_ registers have not been initialized yet.
- __ mov(right_, Immediate(smi_value_));
- __ mov(left_, Operand(dst_));
- if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- __ jmp(entry_label());
- return;
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ JumpIfNotNumber(dst_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- }
- } else {
- // We know we have SSE2 here because otherwise the label is not linked (see
- // NonSmiInputLabel).
- CpuFeatures::Scope use_sse2(SSE2);
- // Handle the non-constant right hand side situation:
- if (left_info_.IsSmi()) {
- // Right is a heap object.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_);
- } else if (right_info_.IsSmi()) {
- // Left is a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- } else {
- // Here we don't know if it's one or both that is a heap object.
- Label only_right_is_heap_object, got_both;
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_, &only_right_is_heap_object);
- // Left was a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_, &got_both);
- // Both were heap objects.
- __ rcl(right_, 1); // Put tag back.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ jmp(&got_both);
- __ bind(&only_right_is_heap_object);
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ bind(&got_both);
- }
- }
- ASSERT(op_ == Token::BIT_AND ||
- op_ == Token::BIT_OR ||
- op_ == Token::BIT_XOR ||
- right_.is(ecx));
- switch (op_) {
- case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
- case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
- case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
- case Token::SHR: __ shr_cl(dst_); break;
- case Token::SAR: __ sar_cl(dst_); break;
- case Token::SHL: __ shl_cl(dst_); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- __ test(dst_, Immediate(0xc0000000));
- __ j(not_zero, &answer_out_of_range_);
- } else {
- // Check that the *signed* result fits in a smi.
- __ cmp(dst_, 0xc0000000);
- __ j(negative, &answer_out_of_range_);
- }
- __ SmiTag(dst_);
- Exit();
-}
-
-
-void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
- Label after_alloc_failure2;
- Label allocation_ok;
- __ bind(&after_alloc_failure2);
- // We have to allocate a number, causing a GC, while keeping hold of
- // the answer in dst_. The answer is not a Smi. We can't just call the
- // runtime shift function here because we already threw away the inputs.
- __ xor_(left_, Operand(left_));
- __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
- __ rcr(left_, 1); // Rotate with carry.
- __ push(dst_); // Smi tagged low 31 bits.
- __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- if (!left_.is(eax)) {
- __ mov(left_, eax);
- }
- __ pop(right_); // High bit.
- __ pop(dst_); // Low 31 bits.
- __ shr(dst_, 1); // Put 0 in top bit.
- __ or_(dst_, Operand(right_));
- __ jmp(&allocation_ok);
-
- // This is the second entry point to the deferred code. It is used only by
- // the bit operations.
- // The dst_ register has the answer. It is not Smi tagged. If mode_ is
- // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
- // or a Smi.
- // Put a heap number pointer in left_.
- __ bind(&answer_out_of_range_);
- SaveRegisters();
- if (mode_ == OVERWRITE_LEFT) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(not_zero, &allocation_ok);
- }
- // This trashes right_.
- __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
- __ bind(&allocation_ok);
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
- op_ != Token::SHR) {
- CpuFeatures::Scope use_sse2(SSE2);
- ASSERT(Token::IsBitOp(op_));
- // Signed conversion.
- __ cvtsi2sd(xmm0, Operand(dst_));
- __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
- } else {
- if (op_ == Token::SHR) {
- __ push(Immediate(0)); // High word of unsigned value.
- __ push(dst_);
- __ fild_d(Operand(esp, 0));
- __ Drop(2);
- } else {
- ASSERT(Token::IsBitOp(op_));
- __ push(dst_);
- __ fild_s(Operand(esp, 0)); // Signed conversion.
- __ pop(dst_);
- }
- __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
- }
- __ mov(dst_, left_);
- RestoreRegisters();
- Exit();
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // Rely on the fact that smis have a 31 bit payload on ia32.
- STATIC_ASSERT(kSmiValueSize == 31);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_AND: {
- // Anding with positive Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() >= 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() >= 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_OR: {
- // Oring with negative Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() < 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() < 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_XOR:
- // Result is always a 32 bit integer. Smi property of inputs is preserved.
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SAR:
- if (left.is_smi()) return TypeInfo::Smi();
- // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
- // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SHR:
- // Result is a smi if we shift by a constant >= 2, an integer32 if
- // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
- if (right.is_constant() && right.handle()->IsSmi()) {
- int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
- if (shift_amount > 1) {
- return TypeInfo::Smi();
- } else if (shift_amount > 0) {
- return TypeInfo::Integer32();
- }
- }
- return TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsSmi()) {
- // The Integer32 range is big enough to take the sum of any two Smis.
- return TypeInfo::Integer32();
- } else if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SHL:
- return TypeInfo::Integer32();
- case Token::SUB:
- // The Integer32 range is big enough to take the difference of any two
- // Smis.
- return (operands_type.IsSmi()) ?
- TypeInfo::Integer32() :
- TypeInfo::Number();
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- } else if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = HEAP->undefined_value();
- switch (op) {
- case Token::ADD:
- if (Smi::IsValid(left + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- if (Smi::IsValid(left - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object->IsUndefined()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), left->reg());
- __ or_(temp.reg(), Operand(right->reg()));
- __ test(temp.reg(), Immediate(kSmiTagMask));
- temp.Unuse();
- both_smi->Branch(zero);
- } else {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- __ test(right->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- JumpIfNotBothSmiUsingTypeInfo(left,
- right,
- scratch,
- left_info,
- right_info,
- deferred->entry_label());
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_not_smi) {
- if (left.is(right)) {
- if (!left_info.IsSmi()) {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- __ mov(scratch, left);
- __ or_(scratch, Operand(right));
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- if (!right_info.IsSmi()) {
- __ test(right, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need eax as the quotient register, edx as the remainder
- // register, neither left nor right in eax or edx, and left copied
- // to eax.
- Result quotient;
- Result remainder;
- bool left_is_in_eax = false;
- // Step 1: get eax for quotient.
- if ((left->is_register() && left->reg().is(eax)) ||
- (right->is_register() && right->reg().is(eax))) {
- // One or both is in eax. Use a fresh non-edx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(edx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(eax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_eax = true;
- }
- if (right->is_register() && right->reg().is(eax)) {
- quotient = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), eax);
- } else {
- // Neither left nor right is in eax.
- quotient = allocator_->Allocate(eax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(eax));
- ASSERT(!(left->is_register() && left->reg().is(eax)));
- ASSERT(!(right->is_register() && right->reg().is(eax)));
-
- // Step 2: get edx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(edx)) ||
- (right->is_register() && right->reg().is(edx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(edx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(edx)) {
- remainder = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), edx);
- } else {
- // Neither left nor right is in edx.
- remainder = allocator_->Allocate(edx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(edx));
- ASSERT(!(left->is_register() && left->reg().is(edx)));
- ASSERT(!(right->is_register() && right->reg().is(edx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(eax);
- frame_->Spill(edx);
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled and distinct.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? eax : edx,
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
- left_type_info, right_type_info, deferred);
- if (!left_is_in_eax) {
- __ mov(eax, left->reg());
- }
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(zero);
- // Divide edx:eax by the right operand.
- __ idiv(right->reg());
-
- // Complete the operation.
- if (op == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero. The
- // virtual frame is unchanged in this block, so local control flow
- // can use a Label rather than a JumpTarget. If the context of this
- // expression will treat -0 like 0, do not do this test.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(left->reg(), Operand(left->reg()));
- __ j(not_zero, &non_zero_result);
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by
- // idiv instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- deferred->Branch(equal);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- deferred->Branch(not_zero);
- // Tag the result and store it in the quotient register.
- __ SmiTag(eax);
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and
- // the dividend is negative, return a floating point negative
- // zero. The frame is unchanged in this block, so local control
- // flow can use a Label rather than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(edx, Operand(edx));
- __ j(not_zero, &non_zero_result, taken);
- __ test(left->reg(), Operand(left->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of ecx if necessary.
- if (left->is_register() && left->reg().is(ecx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ mov(left->reg(), ecx);
- }
- right->ToRegister(ecx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(ecx));
- ASSERT(right->is_register() && right->reg().is(ecx));
- if (left_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- }
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- }
-
- // We will modify right, it must be spilled.
- frame_->Spill(ecx);
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled and distinct. We know that right is ecx and left is
- // not ecx.
- frame_->Spill(left->reg());
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- ecx,
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
-
- // Untag both operands.
- __ mov(answer.reg(), left->reg());
- __ SmiUntag(answer.reg());
- __ SmiUntag(right->reg()); // Right is ecx.
-
- // Perform the operation.
- ASSERT(right->reg().is(ecx));
- switch (op) {
- case Token::SAR: {
- __ sar_cl(answer.reg());
- if (!left_type_info.IsSmi()) {
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- }
- break;
- }
- case Token::SHR: {
- __ shr_cl(answer.reg());
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi. If the answer cannot be represented by a
- // smi, restore the left and right arguments, and jump to slow
- // case. The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->JumpToAnswerOutOfRange(not_zero);
- break;
- }
- case Token::SHL: {
- __ shl_cl(answer.reg());
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- break;
- }
- default:
- UNREACHABLE();
- }
- // Smi-tag the result in answer.
- __ SmiTag(answer.reg());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- Label non_smi_bit_op;
- if (op != Token::BIT_OR) {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
- }
-
- __ mov(answer.reg(), left->reg());
- switch (op) {
- case Token::ADD:
- __ add(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::MUL: {
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // Remove smi tag from the left operand (but keep sign).
- // Left-hand operand has been copied into answer.
- __ SmiUntag(answer.reg());
- // Do multiplication of smis, leaving result in answer.
- __ imul(answer.reg(), Operand(right->reg()));
- // Go slow on overflows.
- deferred->Branch(overflow);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case. The frame is unchanged
- // in this block, so local control flow can use a Label rather
- // than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(answer.reg(), Operand(answer.reg()));
- __ j(not_zero, &non_zero_result, taken);
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
- __ bind(&non_zero_result);
- }
- break;
- }
-
- case Token::BIT_OR:
- __ or_(answer.reg(), Operand(right->reg()));
- __ test(answer.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, deferred->NonSmiInputLabel());
- break;
-
- case Token::BIT_AND:
- __ and_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::BIT_XOR:
- __ xor_(answer.reg(), Operand(right->reg()));
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- TypeInfo type_info,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- type_info_(type_info),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src + value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src - value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract. The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ add(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::SUB,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- }
- __ add(Operand(operand->reg()), Immediate(value));
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred = NULL;
- if (reversed) {
- // The reversed case is only hit when the right operand is not a
- // constant.
- ASSERT(operand->is_register());
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- __ Set(answer.reg(), Immediate(value));
- deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- operand->type_info(),
- overwrite_mode);
- __ sub(answer.reg(), Operand(operand->reg()));
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- deferred = new DeferredInlineSmiSub(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ sub(Operand(operand->reg()), Immediate(value));
- }
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- operand->Unuse();
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (!operand->type_info().IsSmi()) {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- deferred->BindExit();
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- }
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- __ SmiUntag(answer.reg());
- __ shr(answer.reg(), shift_value);
- // A negative Smi shifted right two is in the positive Smi range.
- if (shift_value < 2) {
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->Branch(not_zero);
- }
- operand->Unuse();
- __ SmiTag(answer.reg());
- deferred->BindExit();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- // Move operand into ecx and also into a second register.
- // If operand is already in a register, take advantage of that.
- // This lets us modify ecx, but still bail out to deferred code.
- Result right;
- Result right_copy_in_ecx;
- TypeInfo right_type_info = operand->type_info();
- operand->ToRegister();
- if (operand->reg().is(ecx)) {
- right = allocator()->Allocate();
- __ mov(right.reg(), ecx);
- frame_->Spill(ecx);
- right_copy_in_ecx = *operand;
- } else {
- right_copy_in_ecx = allocator()->Allocate(ecx);
- __ mov(ecx, operand->reg());
- right = *operand;
- }
- operand->Unuse();
-
- answer = allocator()->Allocate();
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- right.reg(),
- right_type_info,
- overwrite_mode);
- __ mov(answer.reg(), Immediate(int_value));
- __ sar(ecx, kSmiTagSize);
- if (!right_type_info.IsSmi()) {
- deferred->Branch(carry);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(right.reg());
- }
- __ shl_cl(answer.reg());
- __ cmp(answer.reg(), 0xc0000000);
- deferred->Branch(sign);
- __ SmiTag(answer.reg());
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
- // We do no shifts, only the Smi conversion, if shift_value is 1.
- if (shift_value > 1) {
- __ shl(answer.reg(), shift_value - 1);
- }
- // Convert int result to Smi, checking that it is in int range.
- STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
- __ add(answer.reg(), Operand(answer.reg()));
- deferred->Branch(overflow);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled.
- frame_->Spill(operand->reg());
- DeferredInlineBinaryOperation* deferred = NULL;
- if (!operand->type_info().IsSmi()) {
- Result left = allocator()->Allocate();
- ASSERT(left.is_valid());
- Result right = allocator()->Allocate();
- ASSERT(right.is_valid());
- deferred = new DeferredInlineBinaryOperation(
- op,
- operand->reg(),
- left.reg(),
- right.reg(),
- operand->type_info(),
- TypeInfo::Smi(),
- overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->JumpToConstantRhs(not_zero, smi_value);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (op == Token::BIT_AND) {
- __ and_(Operand(operand->reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ xor_(Operand(operand->reg()), Immediate(value));
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ or_(Operand(operand->reg()), Immediate(value));
- }
- }
- if (deferred != NULL) deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::DIV:
- if (!reversed && int_value == 2) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check that lowest log2(value) bits of operand are zero, and test
- // smi tag at the same time.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ test(operand->reg(), Immediate(3));
- deferred->Branch(not_zero); // Branch if non-smi or odd smi.
- __ sar(operand->reg(), 1);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Cannot fall through MOD to default case, so we duplicate the
- // default case here.
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- }
- break;
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
- deferred->Branch(not_zero);
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
- } else {
- __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_not_string.Branch(zero, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(left_side.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the right left hand side has the same type as the left hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, ¬_a_symbol);
- // They are symbols, so do identity compare.
- __ cmp(left_side.reg(), right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(¬_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ and_(temp.reg(),
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test string equality and comparison.
- Label comparison_done;
- if (cc == equal) {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- } else {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- // If the length is 0 then the jump is taken and the flags
- // correctly represent being less than the one-character string.
- __ j(below, &comparison_done);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- __ j(not_equal, &comparison_done);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ bind(&comparison_done);
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Handle<Object> right_val = right_side->handle();
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- dest->true_target()->Branch(equal);
- __ test(left_reg, Immediate(kSmiTagMask));
- dest->false_target()->Branch(zero);
- } else {
- // Do the smi check, then the comparison.
- __ test(left_reg, Immediate(kSmiTagMask));
- is_smi.Branch(zero, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition &&
- masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_number.Branch(not_equal, left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ test(result.reg(), Operand(result.reg()));
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Check that the comparison operand is a number. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void CheckComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- // Perform check if operand is not known to be a number.
- if (!operand->type_info().IsNumber()) {
- Label done;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &done);
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, not_taken);
- __ bind(&done);
- }
-}
-
-
-// Load a comparison operand to the FPU stack. This assumes that the operand has
-// already been checked and is a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi otherwise assume heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&smi);
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- if (!operand->type_info().IsNumber()) {
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
-
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
- ¬_numbers);
- LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
- ¬_numbers);
- __ ucomisd(xmm0, xmm1);
- } else {
- Label check_right, compare;
-
- // Make sure that both comparison operands are numbers.
- CheckComparisonOperand(masm_, left_side, left_side, right_side,
- ¬_numbers);
- CheckComparisonOperand(masm_, right_side, left_side, right_side,
- ¬_numbers);
-
- // Load right and left operand to FPU stack and compare.
- LoadComparisonOperand(masm_, right_side);
- LoadComparisonOperand(masm_, left_side);
- __ FCmp();
- }
-
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side, not_taken);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ mov(eax, Operand(esp, 0));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
- __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
- __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(masm()->isolate()->builtins()->builtin(
- Builtins::kFunctionApply));
- __ cmp(Operand(ecx), Immediate(apply_code));
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // ecx is a small non-negative integer, due to the test above.
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(eax);
- // Stack now has 1 element:
- // esp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // esp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm()->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi); // The context is the first argument.
- frame_->EmitPush(Immediate(pairs));
- frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(FACTORY->the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to eax. This is safe because the current frame does not
- // contain a reference to eax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(eax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- if (node->is_fast_smi_loop()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Set the type of the loop variable to smi before compiling the test
- // expression if we are in a fast smi loop condition.
- if (node->is_fast_smi_loop() && has_valid_frame()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(eax);
-
- // eax: value to be iterated over
- __ cmp(eax, FACTORY->undefined_value());
- exit.Branch(equal);
- __ cmp(eax, FACTORY->null_value());
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // eax: value to be iterated over
- __ test(eax, Immediate(kSmiTagMask));
- primitive.Branch(zero);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(eax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in eax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // eax: value to be iterated over
- frame_->EmitPush(eax); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(ecx, eax);
- loop.Bind();
- // Check that there are no elements.
- __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- call_runtime.Branch(zero);
- // For all objects but the receiver, check that the cache is empty.
- __ cmp(ecx, Operand(eax));
- check_prototype.Branch(equal);
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // eax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(edx, Operand(eax));
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->meta_map());
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // eax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(ecx, Operand(eax));
-
- __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(eax); // <- slot 3
- frame_->EmitPush(edx); // <- slot 2
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
- frame_->EmitPush(eax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ mov(eax, frame_->ElementAt(0)); // load the current count
- __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
- node->break_target()->Branch(above_equal);
-
- // Get the i'th entry of the array.
- __ mov(edx, frame_->ElementAt(2));
- __ mov(ebx, FixedArrayElementOperand(edx, eax));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case eax: current iteration count ebx: i'th entry
- // of the enum cache
- __ mov(edx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // eax: current iteration count
- // ebx: i'th entry of the enum cache
- // edx: expected map value
- __ mov(ecx, frame_->ElementAt(4));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, Operand(edx));
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(ebx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ mov(ebx, Operand(eax));
-
- // If the property has been removed while iterating, we just skip it.
- __ test(ebx, Operand(ebx));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. edx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(ebx);
- { Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- // Get the value (under the reference on the stack) from memory.
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2);
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop();
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(eax);
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- frame_->EmitPush(eax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(eax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address,
- masm()->isolate());
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ mov(eax, Operand::StaticVariable(handler_address));
- __ cmp(esp, Operand(eax));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(eax);
- // In case of thrown exceptions, this is where we continue.
- __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Isolate::k_handler_address,
- masm()->isolate());
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Immediate(FACTORY->undefined_value()));
- __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(eax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Immediate(FACTORY->undefined_value()));
- }
- __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(ecx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(ecx);
- frame_->EmitPop(eax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in eax.
- Result return_value = allocator_->Allocate(eax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(eax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-Result CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame()->SyncRange(0, frame()->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (!pretenure &&
- scope()->is_function_scope() &&
- function_info->num_literals() == 0) {
- FastNewClosureStub stub(
- function_info->strict_mode() ? kStrictMode : kNonStrictMode);
- frame()->EmitPush(Immediate(function_info));
- return frame()->CallStub(&stub, 1);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(function_info));
- frame()->EmitPush(Immediate(pretenure
- ? FACTORY->true_value()
- : FACTORY->false_value()));
- return frame()->CallRuntime(Runtime::kNewClosure, 3);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
- ASSERT(!in_safe_int32_mode());
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- Result result = InstantiateFunction(function_info, node->pretenure());
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- Result result = InstantiateFunction(node->shared_function_info(), false);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- ASSERT(!in_safe_int32_mode());
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame()->SyncRange(0, frame()->element_count() - 1);
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(slot->var()->name()));
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- Label exit;
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, FACTORY->the_hole_value());
- __ j(not_equal, &exit);
- __ mov(ecx, FACTORY->undefined_value());
- __ bind(&exit);
- frame()->EmitPush(ecx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame()->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
- frame()->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- Result result = frame()->Pop();
- if (result.is_constant()) {
- if (result.handle()->IsArgumentsMarker()) {
- result = StoreArgumentsObject(false);
- }
- frame()->Push(&result);
- return;
- }
- ASSERT(result.is_register());
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
- frame()->Push(&result);
- exit.Branch(not_equal);
-
- result = StoreArgumentsObject(false);
- frame()->SetElementAt(0, &result);
- result.Unuse();
- exit.Bind();
- return;
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- ASSERT(!in_safe_int32_mode());
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = esi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ mov(tmp.reg(), context);
- }
- __ bind(&next);
- // Terminate at global context.
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->global_context_map()));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- // The register allocator prefers eax if it is free, so the code generator
- // will load the global object directly into eax, which is where the LoadIC
- // expects it.
- frame_->Spill(eax);
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator()->Allocate();
- ASSERT(result->is_valid());
- __ mov(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result->reg(), FACTORY->the_hole_value());
- done->Branch(not_equal, result);
- __ mov(result->reg(), FACTORY->undefined_value());
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ mov(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(slot->var()->name()));
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, FACTORY->the_hole_value());
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ mov(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* slot) {
- Comment cmnt(masm_, "[ Slot");
- if (in_safe_int32_mode()) {
- if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
- frame()->UntaggedPushLocalAt(slot->index());
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->UntaggedPushParameterAt(slot->index());
- } else {
- UNREACHABLE();
- }
- } else {
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- }
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- ASSERT(!in_safe_int32_mode());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- if (frame_->ConstantPoolOverflowed()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- if (in_safe_int32_mode()) {
- temp.set_untagged_int32(true);
- }
- __ Set(temp.reg(), Immediate(node->handle()));
- frame_->Push(&temp);
- } else {
- if (in_safe_int32_mode()) {
- frame_->PushUntaggedElement(node->handle());
- } else {
- frame_->Push(node->handle());
- }
- }
-}
-
-
-void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ push(Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
- ASSERT(target.is_valid());
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ Set(target, Immediate(bits ^ jit_cookie_));
- __ xor_(target, jit_cookie_);
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- if (!value->IsSmi()) return false;
- int int_value = Smi::cast(*value)->value();
- return !is_intn(int_value, kMaxSmiInlinedBits);
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // RegExp pattern (2).
- __ push(Immediate(node_->pattern()));
- // RegExp flags (3).
- __ push(Immediate(node_->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
- ASSERT_EQ(0, registers_to_save & target.bit());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ push(Immediate(Smi::FromInt(size_)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(eax)) {
- __ mov(target_, eax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), FACTORY->undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ mov(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test eax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements array.
- __ mov(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_safe_int32_mode());
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in eax.
- // Freeing eax causes the code generator to load the global into it.
- frame_->Spill(eax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- ASSERT(!in_safe_int32_mode());
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(FACTORY->undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(FACTORY->undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(edx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Enter the runtime system to load the function from the context.
- // Sync the frame so we can push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in eax and edx. The
- // looked-up function is in eax and the receiver is in edx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(eax));
- frame_->EmitPush(eax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(edx));
- frame_->EmitPush(edx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
-
- // Pass receiver to called function.
- if (property->is_synthetic()) {
- Reference ref(this, property);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result_, Immediate(FACTORY->undefined_value()));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result_, Immediate(FACTORY->nan_value()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result_, Immediate(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result_, Immediate(FACTORY->empty_string()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop, loop_condition,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- Load(args->at(1));
- // Load this to eax (= array)
- Load(args->at(0));
- Result array_result = frame_->Pop();
- array_result.ToRegister(eax);
- frame_->SpillAll();
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(array_length, 1);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, FACTORY->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- __ jmp(&loop_condition);
- __ bind(&loop);
- __ cmp(index, Operand(array_length));
- __ j(greater_equal, &done);
-
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_condition);
- __ cmp(index, Operand(array_length));
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, ¬_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(¬_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ascii character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, FACTORY->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- frame_->Drop(1);
- frame_->Push(&array_result);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
-
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(obj.reg(), FACTORY->null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- obj.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // Check that this is an object.
- frame_->Spill(value.reg());
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ cmp(scratch1_, FACTORY->hash_table_map());
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ mov(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ lea(scratch1_,
- Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ add(Operand(map_result_),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(scratch2_, FieldOperand(map_result_, 0));
- __ cmp(scratch2_, FACTORY->value_of_symbol());
- __ j(equal, &false_result);
- __ add(Operand(map_result_), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ test(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmp(scratch1_,
- ContextOperand(scratch2_,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, Immediate(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, Immediate(0));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ test(map_result.reg(), Operand(map_result.reg()));
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
- obj.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- obj.Unuse();
- temp.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- __ test(obj.reg(), Immediate(kSmiTagMask));
- null.Branch(zero);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- { Result tmp = allocator()->Allocate();
- __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
- non_function_constructor.Branch(not_equal);
- }
-
- // The map register now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(FACTORY->function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(FACTORY->Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(FACTORY->null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, taken);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal, not_taken);
- __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, &value, taken);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value, not_taken);
-
- // Store the value.
- __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ mov(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmp(right.reg(), Operand(left.reg()));
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
- Result ebp_as_smi = allocator_->Allocate();
- ASSERT(ebp_as_smi.is_valid());
- __ mov(ebp_as_smi.reg(), Operand(ebp));
- frame_->Push(&ebp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(0, ebx);
- __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
- 0);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
- __ cvtss2sd(xmm1, xmm1);
- __ pxor(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
-
- Result result = allocator_->Allocate(eax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- // Load the arguments on the stack and call the stub.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
-
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
-
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation Smi index of finger, on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
-};
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
- Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
- __ j(less, &search_further);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &first_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
-
- __ bind(&second_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- // Consider prefetching into some reg.
- __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- __ j(less_equal, &cache_miss);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &second_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On ia32 function must be in edi.
- __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(edi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(kEntrySizeSmi));
- __ cmp(ebx, Operand(edx));
- __ j(greater, &update_cache);
-
- // Need to wrap over the cache.
- __ mov(edx, Immediate(kEntriesIndexSmi));
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
-
- // Update the cache itself.
- // edx holds the index.
- __ bind(&update_cache);
- __ pop(ebx); // restore the key
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
- // Store key.
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- // Store value.
- __ pop(ecx); // restore the cache.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(Smi::FromInt(1)));
- __ mov(ebx, eax);
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- if (!dst_.is(eax)) {
- __ mov(dst_, eax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- masm()->isolate()->global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(FACTORY->undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ mov(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg());
-
- // tmp.reg() now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp.reg(), FieldOperand(cache.reg(),
- JSFunctionResultCache::kFingerOffset));
- __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
- deferred->Branch(not_equal);
-
- __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- Load(args->at(0));
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- // Note: this code assumes that indices are passed are within
- // elements' bounds and refer to valid (not holes) values.
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- KeyedLoadIC::kSlowCaseBitFieldMask);
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2.reg(), index1.reg());
- __ or_(tmp2.reg(), Operand(index2.reg()));
- __ test(tmp2.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Check that both indices are valid.
- __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ cmp(tmp2.reg(), Operand(index1.reg()));
- deferred->Branch(below_equal);
- __ cmp(tmp2.reg(), Operand(index2.reg()));
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
- __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
-
- // Swap elements.
- __ mov(object.reg(), Operand(index1.reg(), 0));
- __ mov(tmp2.reg(), Operand(index2.reg(), 0));
- __ mov(Operand(index2.reg(), 0), object.reg());
- __ mov(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
- if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->Push(&res);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(answer.reg(), Immediate(1));
- __ cvtsi2sd(xmm3, Operand(answer.reg()));
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ test(exponent.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
-
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiUntag(exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmp(exponent.reg(), 0);
- __ j(greater_equal, &no_neg);
- __ neg(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shr(exponent.reg(), 1);
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ test(exponent.reg(), Operand(exponent.reg()));
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ test(base.reg(), Operand(base.reg()));
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, Operand(answer.reg()));
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
- __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi);
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- call_runtime.Branch(not_equal);
- __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ and_(answer.reg(), HeapNumber::kExponentMask);
- __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, Operand(answer.reg()));
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(),
- base.reg(), &failure);
- __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
- }
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
-
- if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
- Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
- frame()->Push(&result);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ test(result.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ SmiUntag(result.reg());
- __ cvtsi2sd(xmm0, Operand(result.reg()));
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- __ j(not_equal, &runtime);
- __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
-
- __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
- }
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmp(left, Operand(right));
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
- destination()->false_target()->Branch(not_equal);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfNotString(value.reg());
- }
-
- __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
-
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
- if (FLAG_debug_code) {
- __ AbortIfNotString(string.reg());
- }
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- ASSERT(!in_safe_int32_mode());
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- const Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), GlobalObjectOperand());
- __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(variable->name()));
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(FACTORY->false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, FACTORY->true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(FACTORY->undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, FACTORY->undefined_value());
- }
-
- } else {
- if (in_safe_int32_mode()) {
- Visit(node->expression());
- Result value = frame_->Pop();
- ASSERT(value.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
- value.ToRegister();
- switch (op) {
- case Token::SUB: {
- __ neg(value.reg());
- frame_->Push(&value);
- if (node->no_negative_zero()) {
- // -MIN_INT is MIN_INT with the overflow flag set.
- unsafe_bailout_->Branch(overflow);
- } else {
- // MIN_INT and 0 both have bad negations. They both have 31 zeros.
- __ test(value.reg(), Immediate(0x7FFFFFFF));
- unsafe_bailout_->Branch(zero);
- }
- break;
- }
- case Token::BIT_NOT: {
- __ not_(value.reg());
- frame_->Push(&value);
- break;
- }
- case Token::ADD: {
- // Unary plus has no effect on int32 values.
- frame_->Push(&value);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- Load(node->expression());
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- if (operand_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
- frame_->Spill(operand.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
- __ not_(operand.reg());
- Result answer = operand;
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- } else {
- __ test(operand.reg(), Immediate(kSmiTagMask));
- smi_label.Branch(zero, &operand, taken);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
- __ not_(answer.reg());
-
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Integer32());
- frame_->Push(&answer);
- }
- break;
- }
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- __ test(operand.reg(), Immediate(kSmiTagMask));
- continue_label.Branch(zero, &operand, taken);
-
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation and call
-// into the runtime to convert the argument to a number. Update the
-// original value in old. Call the specialized add or subtract stub.
-// The result is left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(eax); // Save the result of ToNumber to use as the old value.
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ mov(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
-
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- Result tmp;
- if (new_value.is_smi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
- } else {
- // We don't know statically if the input is a smi.
- // In order to combine the overflow and the smi tag check, we need
- // to be able to allocate a byte register. We attempt to do so
- // without spilling. If we fail, we will generate separate overflow
- // and smi tag checks.
- // We allocate and clear a temporary byte register before performing
- // the count operation since clearing the register using xor will clear
- // the overflow flag.
- tmp = allocator_->AllocateByteRegisterWithoutSpilling();
- if (tmp.is_valid()) {
- __ Set(tmp.reg(), Immediate(0));
- }
- }
-
- if (is_increment) {
- __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- } else {
- __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- }
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- // In case we have a smi as input just check for overflow.
- deferred->Branch(overflow);
- } else {
- // If the count operation didn't overflow and the result is a valid
- // smi, we're done. Otherwise, we jump to the deferred slow-case
- // code.
- // We combine the overflow and the smi tag check if we could
- // successfully allocate a temporary byte register.
- if (tmp.is_valid()) {
- __ setcc(overflow, tmp.reg());
- __ or_(Operand(tmp.reg()), new_value.reg());
- __ test(tmp.reg(), Immediate(kSmiTagMask));
- tmp.Unuse();
- deferred->Branch(not_zero);
- } else {
- // Otherwise we test separately for overflow and smi tag.
- deferred->Branch(overflow);
- __ test(new_value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- // The result of ++ or -- is an Integer32 if the
- // input is a smi. Otherwise it is a number.
- if (new_value.is_smi()) {
- new_value.set_type_info(TypeInfo::Integer32());
- } else {
- new_value.set_type_info(TypeInfo::Number());
- }
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
- Token::Value op = node->op();
- Comment cmnt(masm_, "[ Int32BinaryOperation");
- ASSERT(in_safe_int32_mode());
- ASSERT(safe_int32_mode_enabled());
- ASSERT(FLAG_safe_int32_compiler);
-
- if (op == Token::COMMA) {
- // Discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- ASSERT(right.is_untagged_int32());
- ASSERT(left.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
- ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
-
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- UNREACHABLE();
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- if (left.is_constant() || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- // Constants are known to be int32 values, from static analysis,
- // or else will be converted to int32 by implicit ECMA [[ToInt32]].
- if (left.is_constant()) {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- } else {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- }
-
- left.ToRegister();
- if (op == Token::BIT_OR) {
- __ or_(Operand(left.reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- __ xor_(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(Operand(left.reg()), Immediate(value));
- }
- } else {
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::BIT_OR) {
- __ or_(left.reg(), Operand(right.reg()));
- } else if (op == Token::BIT_XOR) {
- __ xor_(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(left.reg(), Operand(right.reg()));
- }
- }
- frame_->Push(&left);
- right.Unuse();
- break;
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- bool test_shr_overflow = false;
- left.ToRegister();
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
- if (op == Token::SAR) {
- __ sar(left.reg(), shift_amount);
- } else if (op == Token::SHL) {
- __ shl(left.reg(), shift_amount);
- } else {
- ASSERT(op == Token::SHR);
- __ shr(left.reg(), shift_amount);
- if (shift_amount == 0) test_shr_overflow = true;
- }
- } else {
- // Move right to ecx
- if (left.is_register() && left.reg().is(ecx)) {
- right.ToRegister();
- __ xchg(left.reg(), right.reg());
- left = right; // Left is unused here, copy of right unused by Push.
- } else {
- right.ToRegister(ecx);
- left.ToRegister();
- }
- if (op == Token::SAR) {
- __ sar_cl(left.reg());
- } else if (op == Token::SHL) {
- __ shl_cl(left.reg());
- } else {
- ASSERT(op == Token::SHR);
- __ shr_cl(left.reg());
- test_shr_overflow = true;
- }
- }
- {
- Register left_reg = left.reg();
- frame_->Push(&left);
- right.Unuse();
- if (test_shr_overflow && !node->to_int32()) {
- // Uint32 results with top bit set are not Int32 values.
- // If they will be forced to Int32, skip the test.
- // Test is needed because shr with shift amount 0 does not set flags.
- __ test(left_reg, Operand(left_reg));
- unsafe_bailout_->Branch(sign);
- }
- }
- break;
- }
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- } else {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- }
-
- left.ToRegister();
- if (op == Token::ADD) {
- __ add(Operand(left.reg()), Immediate(value));
- } else if (op == Token::SUB) {
- __ sub(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::MUL);
- __ imul(left.reg(), left.reg(), value);
- }
- } else {
- left.ToRegister();
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::ADD) {
- __ add(left.reg(), Operand(right.reg()));
- } else if (op == Token::SUB) {
- __ sub(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::MUL);
- // We have statically verified that a negative zero can be ignored.
- __ imul(left.reg(), Operand(right.reg()));
- }
- }
- right.Unuse();
- frame_->Push(&left);
- if (!node->to_int32() || op == Token::MUL) {
- // If ToInt32 is called on the result of ADD, SUB, we don't
- // care about overflows.
- // Result of MUL can be non-representable precisely in double so
- // we have to check for overflow.
- unsafe_bailout_->Branch(overflow);
- }
- break;
- case Token::DIV:
- case Token::MOD: {
- if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
- if (left.is_register() && left.reg().is(edi)) {
- right.ToRegister(ebx);
- } else {
- right.ToRegister(edi);
- }
- }
- left.ToRegister(eax);
- Result edx_reg = allocator_->Allocate(edx);
- right.ToRegister();
- // The results are unused here because BreakTarget::Branch cannot handle
- // live results.
- Register right_reg = right.reg();
- left.Unuse();
- right.Unuse();
- edx_reg.Unuse();
- __ cmp(right_reg, 0);
- // Ensure divisor is positive: no chance of non-int32 or -0 result.
- unsafe_bailout_->Branch(less_equal);
- __ cdq(); // Sign-extend eax into edx:eax
- __ idiv(right_reg);
- if (op == Token::MOD) {
- // Negative zero can arise as a negative divident with a zero result.
- if (!node->no_negative_zero()) {
- Label not_negative_zero;
- __ test(edx, Operand(edx));
- __ j(not_zero, ¬_negative_zero);
- __ test(eax, Operand(eax));
- unsafe_bailout_->Branch(negative);
- __ bind(¬_negative_zero);
- }
- Result edx_result(edx, TypeInfo::Integer32());
- edx_result.set_untagged_int32(true);
- frame_->Push(&edx_result);
- } else {
- ASSERT(op == Token::DIV);
- __ test(edx, Operand(edx));
- unsafe_bailout_->Branch(not_equal);
- Result eax_result(eax, TypeInfo::Integer32());
- eax_result.set_untagged_int32(true);
- frame_->Push(&eax_result);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else if (in_safe_int32_mode()) {
- Visit(node->left());
- Visit(node->right());
- Int32BinaryOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- ASSERT(!in_safe_int32_mode());
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareOperation");
-
- bool left_already_loaded = false;
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(HEAP->number_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ cmp(answer.reg(), FACTORY->heap_number_map());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->string_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable string object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
- temp.Unuse();
- answer.Unuse();
- destination()->Split(below);
-
- } else if (check->Equals(HEAP->boolean_symbol())) {
- __ cmp(answer.reg(), FACTORY->true_value());
- destination()->true_target()->Branch(equal);
- __ cmp(answer.reg(), FACTORY->false_value());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(HEAP->undefined_symbol())) {
- __ cmp(answer.reg(), FACTORY->undefined_value());
- destination()->true_target()->Branch(equal);
-
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable object.
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(HEAP->function_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
- } else if (check->Equals(HEAP->object_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(answer.reg(), FACTORY->null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- answer.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- } else if (op == Token::LT &&
- right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsHeapNumber()) {
- Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
- if (check->value() == 2147483648.0) { // 0x80000000.
- Load(left);
- left_already_loaded = true;
- Result lhs = frame_->Pop();
- lhs.ToRegister();
- __ test(lhs.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero); // All Smis are less.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
- __ cmp(scratch.reg(), FACTORY->heap_number_map());
- JumpTarget not_a_number;
- not_a_number.Branch(not_equal, &lhs);
- __ mov(scratch.reg(),
- FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
- __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
- not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
- const uint32_t borderline_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
- scratch.Unuse();
- lhs.Unuse();
- destination()->true_target()->Branch(less);
- destination()->false_target()->Jump();
-
- not_a_number.Bind(&lhs);
- frame_->Push(&lhs);
- }
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- if (!left_already_loaded) Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- if (!left_already_loaded) Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- if (!left_already_loaded) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(right);
- }
- } else {
- if (!left_already_loaded) Load(left);
- Load(right);
- }
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ cmp(operand.reg(), FACTORY->null_value());
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ cmp(operand.reg(), FACTORY->undefined_value());
- destination()->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
- && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
- && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
- && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
- && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
-}
-#endif
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name,
- bool is_contextual)
- : dst_(dst),
- receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(eax)) {
- __ mov(eax, receiver_);
- }
- __ Set(ecx, Immediate(name_));
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ call(ic, mode);
- // The call must be followed by:
- // - a test eax instruction to indicate that the inobject property
- // case was inlined.
- // - a mov ecx or mov edx instruction to indicate that the
- // contextual property load was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- Counters* counters = masm()->isolate()->counters();
- if (is_contextual_) {
- masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
- __ IncrementCounter(counters->named_load_global_inline_miss(), 1);
- if (is_dont_delete_) {
- __ IncrementCounter(counters->dont_delete_hint_miss(), 1);
- }
- } else {
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(counters->named_load_inline_miss(), 1);
- }
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (!receiver_.is(eax)) {
- // Register eax is available for key.
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (!key_.is(edx)) {
- // Register edx is available for receiver.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- } else {
- __ xchg(edx, eax);
- }
- // Calculate the delta from the IC call instruction to the map check
- // cmp instruction in the inlined version. This delta is stored in
- // a test(eax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the cmp instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- Register scratch,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- scratch_(scratch),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Register scratch_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
- // Move value_ to eax, key_ to ecx, and receiver_ to edx.
- Register old_value = value_;
-
- // First, move value to eax.
- if (!value_.is(eax)) {
- if (key_.is(eax)) {
- // Move key_ out of eax, preferably to ecx.
- if (!value_.is(ecx) && !receiver_.is(ecx)) {
- __ mov(ecx, key_);
- key_ = ecx;
- } else {
- __ mov(scratch_, key_);
- key_ = scratch_;
- }
- }
- if (receiver_.is(eax)) {
- // Move receiver_ out of eax, preferably to edx.
- if (!value_.is(edx) && !key_.is(edx)) {
- __ mov(edx, receiver_);
- receiver_ = edx;
- } else {
- // Both moves to scratch are from eax, also, no valid path hits both.
- __ mov(scratch_, receiver_);
- receiver_ = scratch_;
- }
- }
- __ mov(eax, value_);
- value_ = eax;
- }
-
- // Now value_ is in eax. Move the other two to the right positions.
- // We do not update the variables key_ and receiver_ to ecx and edx.
- if (key_.is(ecx)) {
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (key_.is(edx)) {
- if (receiver_.is(ecx)) {
- __ xchg(edx, ecx);
- } else {
- __ mov(ecx, key_);
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- }
- } else { // Key is not in edx or ecx.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- __ mov(ecx, key_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(masm()->isolate()->builtins()->builtin(
- (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC) register.
- if (!old_value.is(eax)) __ mov(old_value, eax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
-
- Isolate* isolate = masm()->isolate();
- Factory* factory = isolate->factory();
- Counters* counters = isolate->counters();
-
- bool contextual_load_in_builtin =
- is_contextual &&
- (isolate->bootstrapper()->IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- Result result;
- // Do not inline in the global code or when not in loop.
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the property load.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(),
- receiver.reg(),
- name,
- is_contextual);
-
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(factory->null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
-
- // The delta from the patch label to the actual load must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
-
- if (is_contextual) {
- // Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), factory->null_value());
- if (FLAG_debug_code) {
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- factory->global_property_cell_map());
- __ Assert(equal, "Uninitialized inlined contextual load");
- }
- __ mov(result.reg(),
- FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
- __ cmp(result.reg(), factory->the_hole_value());
- deferred->Branch(equal);
- bool is_dont_delete = false;
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- deferred->set_is_dont_delete(is_dont_delete);
- if (!is_dont_delete) {
- __ cmp(result.reg(), factory->the_hole_value());
- deferred->Branch(equal);
- } else if (FLAG_debug_code) {
- __ cmp(result.reg(), factory->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
- __ IncrementCounter(counters->named_load_global_inline(), 1);
- if (is_dont_delete) {
- __ IncrementCounter(counters->dont_delete_hint_hit(), 1);
- }
- } else {
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(counters->named_load_inline(), 1);
- }
-
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- slow.Branch(zero, &value, &receiver);
-
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- __ bind(&patch_site);
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ mov(FieldOperand(receiver.reg(), offset), value.reg());
- __ mov(result.reg(), Operand(value.reg()));
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ mov(receiver.reg(), Operand(value.reg()));
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test eax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ test(eax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know the
- // receiver map yet, so we initially generate the code with a check
- // against an invalid map. In the inline cache code, we patch the map
- // check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is in elements, which is guaranteed non-shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Get the elements array from the receiver.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that the key is within bounds.
- __ cmp(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole.
- // Key holds a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(elements.reg(),
- FieldOperand(elements.reg(),
- key.reg(),
- times_2,
- FixedArray::kHeaderSize));
- result = elements;
- __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(), 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- tmp.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
- deferred->Branch(not_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ mov(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ test(result.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis (because the fixed array check above ensures the
- // elements are in fast case). Use unsigned comparison to handle negative
- // keys.
- __ cmp(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Store the value.
- __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
- __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(), 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>::cast(raw_name->handle());
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) cgen_->frame()->Dup();
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- if (!persist_after_get_) set_unloaded();
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::TakeValue() {
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can
- // reach here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
#define __ masm.
-
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
-MemCopyFunction CreateMemCopyFunction() {
- HandleScope scope;
- MacroAssembler masm(NULL, 1 * KB);
+OS::MemCopyFunction CreateMemCopyFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) return &MemCopyWrapper;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10198,13 +84,13 @@
if (FLAG_debug_code) {
__ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(kMinComplexMemCopy));
+ Immediate(OS::kMinComplexMemCopy));
Label ok;
__ j(greater_equal, &ok);
__ int3();
__ bind(&ok);
}
- if (masm.isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope enable(SSE2);
__ push(edi);
__ push(esi);
@@ -10232,7 +118,6 @@
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
- __ IncrementCounter(masm.isolate()->counters()->memcopy_aligned(), 1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
@@ -10280,7 +165,6 @@
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ IncrementCounter(masm.isolate()->counters()->memcopy_unaligned(), 1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
@@ -10324,7 +208,6 @@
}
} else {
- __ IncrementCounter(masm.isolate()->counters()->memcopy_noxmm(), 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
@@ -10371,13 +254,8 @@
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- // Copy the generated code into an executable chunk and return a pointer
- // to the first instruction in it as a C++ function pointer.
- LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
- if (chunk == NULL) return &MemCopyWrapper;
- memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
- return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
+ CPU::FlushICache(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
#undef __
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index acd651b..8f090b1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,275 +30,18 @@
#include "ast.h"
#include "ic-inl.h"
-#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class FrameRegisterState;
-class RegisterAllocator;
-class RegisterFile;
-class RuntimeCallHelper;
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue and SetValue.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode.
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
// -------------------------------------------------------------------------
// CodeGenerator
-class CodeGenerator: public AstVisitor {
+class CodeGenerator {
public:
- static bool MakeCode(CompilationInfo* info);
-
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@@ -318,33 +61,7 @@
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
- // Return a position of the element at |index_as_smi| + |additional_offset|
- // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand FixedArrayElementOperand(Register array,
Register index_as_smi,
int additional_offset = 0) {
@@ -353,445 +70,6 @@
}
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Control of side-effect-free int32 expression compilation.
- bool in_safe_int32_mode() { return in_safe_int32_mode_; }
- void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
- bool safe_int32_mode_enabled() {
- return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
- }
- void set_safe_int32_mode_enabled(bool value) {
- safe_int32_mode_enabled_ = value;
- }
- void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
- unsafe_bailout_ = unsafe_bailout;
- }
-
- // Take the Result that is an untagged int32, and convert it to a tagged
- // Smi or HeapNumber. Remove the untagged_int32 flag from the result.
- void ConvertInt32ResultToNumber(Result* value);
- void ConvertInt32ResultToSmi(Result* value);
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* expr,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Evaluate an expression and place its value on top of the frame,
- // using, or not using, the side-effect-free expression compiler.
- void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
- void LoadWithSafeInt32ModeDisabled(Expression* expr);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Receiver and key are passed on the frame and consumed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- // Allocates a temporary register, possibly spilling from the frame,
- // if it needs to check both left and right.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // a deferred code object.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to the label if the inputs
- // are not both smis.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_non_smi);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results left and right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
-
- // Emit code to perform a binary operation on two untagged int32 values.
- // The values are on top of the frame, and the result is pushed on the frame.
- void Int32BinaryOperation(BinaryOperation* node);
-
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 17 bits.
- static const int kMaxSmiInlinedBits = 17;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target or into the stack using
- // at most 16 bits of user-controlled data per assembly operation.
- void MoveUnsafeSmi(Register target, Handle<Object> value);
- void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
- void PushUnsafeSmi(Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- Result InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for types.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- // Construct a RegExp exec result with two in-object properties.
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* stmt);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
- bool in_safe_int32_mode_;
- bool safe_int32_mode_enabled_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
- // The target of the bailout from a side-effect-free int32 subexpression.
- BreakTarget* unsafe_bailout_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- // A cookie that is used for JIT IMM32 Encoding. Initialized to a
- // random number when the command-line
- // FLAG_mask_constants_with_cookie is true, zero otherwise.
- int jit_cookie_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class LCodeGen;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
- friend class InlineRuntimeFunctionsTable;
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 286ed7b..615dbfe 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -42,12 +42,12 @@
namespace internal {
void CPU::Setup() {
- CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
- cpu_features->Clear();
- cpu_features->Probe(true);
- if (!cpu_features->IsSupported(SSE2) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(SSE2);
}
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 33c5251..2389948 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index c6342d7..72fdac8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -641,14 +641,16 @@
__ neg(edx);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(5, eax);
+ __ PrepareCallCFunction(6, eax);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+ __ mov(Operand(esp, 5 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 8084694..0f95abd 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -108,7 +108,7 @@
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 16c39c5..69d5e77 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -231,7 +231,7 @@
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
NearLabel ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -773,7 +773,7 @@
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -801,7 +801,7 @@
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
__ bind(&slow_case);
}
@@ -812,7 +812,7 @@
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -822,14 +822,14 @@
if (default_clause == NULL) {
__ jmp(nested_statement.break_target());
} else {
- __ jmp(default_clause->body_target()->entry_label());
+ __ jmp(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -1563,27 +1563,26 @@
}
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -2268,15 +2267,6 @@
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the full code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(isolate()->heap()->empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2458,10 +2448,73 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // TODO(3110205): Implement this.
- // Currently unimplemented. Emit false, a safe choice.
+ if (FLAG_debug_code) __ AbortIfSmi(eax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ j(not_zero, if_true);
+
+ // Check for fast case object. Return false for slow case objects.
+ __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ cmp(ecx, FACTORY->hash_table_map());
+ __ j(equal, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+ __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ // ebx: descriptor array
+ // ecx: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ add(Operand(ebx),
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, FieldOperand(ebx, 0));
+ __ cmp(edx, FACTORY->value_of_symbol());
+ __ j(equal, if_false);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(ebx, Operand(ecx));
+ __ j(not_equal, &loop);
+
+ // Reload map as register ebx was used as temporary above.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edx,
+ FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ cmp(ecx,
+ ContextOperand(edx,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, if_false);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2717,15 +2770,16 @@
__ bind(&heapnumber_allocated);
- __ PrepareCallCFunction(0, ebx);
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()),
- 0);
+ 1);
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
- if (isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
@@ -2800,7 +2854,7 @@
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
MathPowStub stub;
__ CallStub(&stub);
} else {
@@ -3033,15 +3087,14 @@
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; ++i) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in edi. Move it in there.
- if (!result_register().is(edi)) __ mov(edi, result_register());
+ // InvokeFunction requires the function in edi. Move it in there.
+ __ mov(edi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(edi, count, CALL_FUNCTION);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3778,7 +3831,11 @@
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
NearLabel no_conversion;
@@ -4196,30 +4253,7 @@
default:
break;
}
-
__ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
}
@@ -4240,7 +4274,6 @@
default:
break;
}
-
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 48ffc73..4106f01 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -371,12 +371,6 @@
}
-// The offset from the inlined patch site to the start of the
-// inlined load instruction. It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -1273,172 +1267,6 @@
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = test_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the last 4 bytes of a six byte
- // memory-to-register move instruction, so we add 2 to get the
- // offset to the last 4 bytes.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 2;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// cmp reg, <the hole>
-// je slow
-// ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address mov_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a mov ecx/edx,
- // nothing was inlined.
- byte b = *mov_instruction_address;
- if (b != kMovEcxByte && b != kMovEdxByte) return false;
- // If we don't have the hole check generated, we can only support
- // "don't delete" cells.
- if (b == kMovEdxByte && !is_dont_delete) return false;
-
- Address delta_address = mov_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = mov_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The cell is in the last 4 bytes of a five byte mov reg, imm32
- // instruction, so we add 1 to get the offset to the last 4 bytes.
- Address offset_address =
- mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
- *reinterpret_cast<Object**>(offset_address) = cell;
- return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test eax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 4 bytes of
- // the 7-byte operand-immediate compare instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a six byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a six byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 2;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == HEAP->null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map cmp
- // instruction. This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 4 bytes
- // of the 7-byte operand-immediate compare instruction, so we add 3
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 3;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -1519,12 +1347,6 @@
}
-// The offset from the inlined patch site to the start of the inlined
-// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
deleted file mode 100644
index 76c0d02..0000000
--- a/src/ia32/jump-target-ia32.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint hint) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_, hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i], hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_, hint);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last(), hint);
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 1691098..46c71e8 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -77,7 +77,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -132,7 +132,7 @@
__ push(edi); // Callee's JS function.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
@@ -254,7 +254,7 @@
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -386,7 +386,7 @@
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -408,20 +408,21 @@
}
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- bool adjusted) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode,
+ SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (!adjusted) {
+ if (context_mode == RESTORE_CONTEXT) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ call(code, mode);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -432,25 +433,44 @@
}
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode) {
+ CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr,
- bool adjusted) {
+ ContextMode context_mode) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (!adjusted) {
+ if (context_mode == RESTORE_CONTEXT) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ CallRuntime(fun, argc);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode) {
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
@@ -462,8 +482,16 @@
}
RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ 0,
+ deoptimization_environment->deoptimization_index());
+ }
}
@@ -612,6 +640,7 @@
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
+ ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
@@ -697,38 +726,38 @@
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
default:
@@ -1062,7 +1091,7 @@
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (isolate()->cpu_features()->IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
@@ -1143,7 +1172,7 @@
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr, false);
+ CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@@ -1218,7 +1247,7 @@
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -1330,12 +1359,8 @@
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ pushad();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ popad();
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoGoto(LGoto* instr) {
@@ -1837,7 +1862,7 @@
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
NearLabel true_value, done;
__ test(eax, Operand(eax));
@@ -1856,7 +1881,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
__ test(eax, Operand(eax));
EmitBranch(true_block, false_block, zero);
}
@@ -1928,7 +1953,7 @@
void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
@@ -1939,20 +1964,24 @@
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
- // Get the temp register reserved by the instruction. This needs to be edi as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
+ // Get the temp register reserved by the instruction. This needs to be a
+ // register which is pushed last by PushSafepointRegisters as top of the
+ // stack is used to pass the offset to the location of the map check to
+ // the stub.
Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(edi));
+ ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 16;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RESTORE_CONTEXT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
- __ PopSafepointRegisters();
}
@@ -1980,7 +2009,7 @@
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@@ -2003,7 +2032,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
@@ -2028,11 +2057,11 @@
}
__ mov(esp, ebp);
__ pop(ebp);
- __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->check_hole_value()) {
@@ -2042,7 +2071,20 @@
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+ RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
@@ -2060,6 +2102,19 @@
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(edx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2122,7 +2177,7 @@
ASSERT(instr->hydrogen()->need_generic());
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else {
NearLabel done;
for (int i = 0; i < map_count - 1; ++i) {
@@ -2144,7 +2199,7 @@
__ bind(&generic);
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadField(result, object, map, name);
@@ -2161,7 +2216,7 @@
__ mov(ecx, instr->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2304,11 +2359,11 @@
break;
case kExternalUnsignedIntArray:
__ mov(result, Operand(external_pointer, key, times_4, 0));
- __ test(Operand(result), Immediate(0x80000000));
+ __ test(result, Operand(result));
// TODO(danno): we could be more clever here, perhaps having a special
// version of the stub that detects if the overflow case actually
// happens, and generate code that returns a double rather than int.
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(negative, instr->environment());
break;
case kExternalFloatArray:
UNREACHABLE();
@@ -2324,7 +2379,7 @@
ASSERT(ToRegister(instr->key()).is(eax));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2438,7 +2493,7 @@
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- v8::internal::ParameterCount actual(eax);
+ ParameterCount actual(eax);
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
@@ -2512,7 +2567,7 @@
}
// Setup deoptimization.
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -2534,7 +2589,7 @@
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2555,10 +2610,8 @@
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -2574,7 +2627,6 @@
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
- __ PopSafepointRegisters();
}
@@ -2655,25 +2707,16 @@
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Label below_half, done;
// xmm_scratch = 0.5
ExternalReference one_half = ExternalReference::address_of_one_half();
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(above, &below_half);
// input = input + 0.5
__ addsd(input_reg, xmm_scratch);
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
- }
// Compute Math.floor(value + 0.5).
// Use truncating instruction (OK because input is positive).
@@ -2682,6 +2725,27 @@
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
+
+ __ bind(&below_half);
+
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+ // we can ignore the difference between a result of -0 and +0.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If the sign is positive, we return +0.
+ __ movmskpd(output_reg, input_reg);
+ __ test(output_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ // If the input is >= -0.5, we return +0.
+ __ mov(output_reg, Immediate(0xBF000000));
+ __ movd(xmm_scratch, Operand(output_reg));
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below, instr->environment());
+ }
+ __ Set(output_reg, Immediate(0));
+ __ bind(&done);
}
@@ -2763,10 +2827,32 @@
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ NearLabel positive, done, zero, negative;
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(input_reg, xmm0);
+ __ j(above, &positive);
+ __ j(equal, &zero);
+ ExternalReference nan = ExternalReference::address_of_nan();
+ __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ jmp(&done);
+ __ bind(&zero);
+ __ push(Immediate(0xFFF00000));
+ __ push(Immediate(0));
+ __ movdbl(input_reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ __ jmp(&done);
+ __ bind(&positive);
+ __ fldln2();
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), input_reg);
+ __ fld_d(Operand(esp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(input_reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ __ bind(&done);
}
@@ -2774,7 +2860,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -2782,7 +2868,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -2819,6 +2905,21 @@
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(edi, count, CALL_FUNCTION, &generator);
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->key()).is(ecx));
@@ -2827,7 +2928,7 @@
int arity = instr->arity();
Handle<Code> ic = isolate()->stub_cache()->
ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2839,7 +2940,7 @@
Handle<Code> ic = isolate()->stub_cache()->
ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2849,7 +2950,7 @@
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
__ Drop(1);
}
@@ -2862,7 +2963,7 @@
Handle<Code> ic = isolate()->stub_cache()->
ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
}
@@ -2880,12 +2981,12 @@
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr, false);
+ CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
}
@@ -2925,10 +3026,10 @@
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = info_->is_strict()
+ Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -3025,10 +3126,10 @@
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic = info_->is_strict()
+ Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -3146,7 +3247,7 @@
// contained in the register pointer map.
__ Set(result, Immediate(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -3159,16 +3260,12 @@
__ SmiTag(index);
__ push(index);
}
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
if (FLAG_debug_code) {
__ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
__ StoreToSafepointRegisterSlot(result, eax);
- __ PopSafepointRegisters();
}
@@ -3211,14 +3308,11 @@
// contained in the register pointer map.
__ Set(result, Immediate(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
__ StoreToSafepointRegisterSlot(result, eax);
- __ PopSafepointRegisters();
}
@@ -3229,6 +3323,22 @@
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ if (instr->left()->IsConstantOperand()) {
+ __ push(ToImmediate(instr->left()));
+ } else {
+ __ push(ToOperand(instr->left()));
+ }
+ if (instr->right()->IsConstantOperand()) {
+ __ push(ToImmediate(instr->right()));
+ } else {
+ __ push(ToOperand(instr->right()));
+ }
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -3265,7 +3375,7 @@
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -3287,10 +3397,7 @@
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
@@ -3298,7 +3405,6 @@
__ bind(&done);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
__ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
}
@@ -3334,13 +3440,9 @@
Register reg = ToRegister(instr->result());
__ Set(reg, Immediate(0));
- __ PushSafepointRegisters();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ StoreToSafepointRegisterSlot(reg, eax);
- __ PopSafepointRegisters();
}
@@ -3427,7 +3529,7 @@
__ jmp(&done);
__ bind(&heap_number);
- if (isolate()->cpu_features()->IsSupported(SSE3)) {
+ if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
NearLabel convert;
// Use more powerful conversion when sse3 is available.
@@ -3537,7 +3639,7 @@
// the JS bitwise operations.
__ cvttsd2si(result_reg, Operand(input_reg));
__ cmp(result_reg, 0x80000000u);
- if (isolate()->cpu_features()->IsSupported(SSE3)) {
+ if (CpuFeatures::IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatures::Scope scope(SSE3);
NearLabel convert, done;
@@ -3755,16 +3857,16 @@
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
}
@@ -3786,9 +3888,12 @@
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
} else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ CallRuntime(Runtime::kCreateObjectLiteralShallow,
+ 4,
+ instr,
+ CONTEXT_ADJUSTED);
}
}
@@ -3796,7 +3901,7 @@
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
__ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
+ CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
}
@@ -3821,7 +3926,7 @@
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -3833,7 +3938,7 @@
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
__ pop(ebx);
__ bind(&allocated);
@@ -3861,14 +3966,14 @@
FastNewClosureStub stub(
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr, false);
+ CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
}
}
@@ -3880,7 +3985,7 @@
} else {
__ push(ToOperand(input));
}
- CallRuntime(Runtime::kTypeof, 1, instr, false);
+ CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
}
@@ -4083,7 +4188,7 @@
__ j(above_equal, &done);
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
__ bind(&done);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 4414e6a..f8bbea3 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -61,7 +61,8 @@
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
- resolver_(this) {
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -129,7 +130,7 @@
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info()->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -146,8 +147,8 @@
Register temporary,
Register temporary2);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -164,16 +165,44 @@
bool GenerateRelocPadding();
bool GenerateSafepointTable();
- void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
- bool adjusted = true) {
+ enum ContextMode {
+ RESTORE_CONTEXT,
+ CONTEXT_ADJUSTED
+ };
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* fun,
+ int argc,
+ LInstruction* instr,
+ ContextMode context_mode);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ ContextMode context_mode) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr, adjusted);
+ CallRuntime(function, argc, instr, context_mode);
}
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
@@ -182,7 +211,9 @@
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
@@ -281,6 +312,27 @@
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ }
+
+ ~PushSafepointRegistersScope() {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index eabfecc..3d1da40 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -460,3 +462,5 @@
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 199a80a..aa91a83 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -71,22 +71,21 @@
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ LUnallocated* operand = LUnallocated::cast(it.Next());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ LUnallocated* operand = LUnallocated::cast(it.Next());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -303,6 +302,15 @@
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" ");
+ InputAt(1)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
}
@@ -1120,9 +1128,9 @@
return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
+ if (HConstant::cast(v)->ToBoolean()) {
return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
+ } else {
return new LGoto(instr->SecondSuccessor()->block_id());
}
}
@@ -1187,7 +1195,7 @@
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
- LOperand* argument = UseOrConstant(instr->argument());
+ LOperand* argument = UseAny(instr->argument());
return new LPushArgument(argument);
}
@@ -1222,9 +1230,24 @@
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ if (op == kMathLog) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ return DefineSameAsFirst(result);
+ } else if (op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
@@ -1633,9 +1656,8 @@
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
- CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && cpu_features->IsSupported(SSE3))
+ (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1656,7 +1678,7 @@
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
- !Isolate::Current()->cpu_features()->IsSupported(SSE3);
+ !CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1746,20 +1768,39 @@
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), edx);
+ LOperand* value = UseFixed(instr->value(), eax);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(context, global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1978,6 +2019,13 @@
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2022,7 +2070,8 @@
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ new LDeleteProperty(UseAtStart(instr->object()),
+ UseOrConstantAtStart(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2110,7 +2159,6 @@
env->Push(value);
}
}
- ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index a9d769b..76c90be 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -39,6 +39,7 @@
// Forward declarations.
class LCodeGen;
+
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
@@ -106,6 +107,7 @@
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
@@ -121,7 +123,8 @@
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -146,12 +149,14 @@
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
@@ -1292,21 +1297,59 @@
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* global_object() { return InputAt(1); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(2); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1410,6 +1453,25 @@
};
+class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
@@ -1655,6 +1717,7 @@
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1716,6 +1779,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
@@ -1723,6 +1787,22 @@
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ba30c49..ad567bc 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
#include "serialize.h"
@@ -41,11 +41,14 @@
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(isolate()->heap()->undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
@@ -231,7 +234,7 @@
void MacroAssembler::FCmp() {
- if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
+ if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();
@@ -1027,19 +1030,6 @@
}
-void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target) {
- JumpTarget ok;
- test(result, Operand(result));
- ok.Branch(not_zero, taken);
- test(op, Operand(op));
- then_target->Branch(sign, not_taken);
- ok.Bind();
-}
-
-
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
@@ -1988,17 +1978,14 @@
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- // Reserve space for Isolate address which is always passed as last parameter
- num_arguments += 1;
-
- int frameAlignment = OS::ActivationFrameAlignment();
- if (frameAlignment != 0) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frameAlignment));
- and_(esp, -frameAlignment);
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
sub(Operand(esp), Immediate(num_arguments * kPointerSize));
@@ -2016,11 +2003,6 @@
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
- // Pass current isolate address as additional parameter.
- mov(Operand(esp, num_arguments * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- num_arguments += 1;
-
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
@@ -2030,13 +2012,15 @@
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
- add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+ add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
CodePatcher::CodePatcher(byte* address, int size)
- : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ : address_(address),
+ size_(size),
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index bafb175..6909272 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,13 +50,16 @@
typedef Operand MemOperand;
// Forward declaration.
-class JumpTarget;
class PostCallGenerator;
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
// ---------------------------------------------------------------------------
// GC Support
@@ -420,12 +423,6 @@
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
@@ -580,7 +577,10 @@
void Move(Register target, Handle<Object> value);
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
// ---------------------------------------------------------------------------
@@ -635,6 +635,10 @@
Register scratch2,
Label* on_not_flat_ascii_strings);
+ static int SafepointRegisterStackIndex(Register reg) {
+ return SafepointRegisterStackIndex(reg.code());
+ }
+
private:
bool generating_stub_;
bool allow_stub_calls_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index f1c773b..067f8c8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -99,7 +99,7 @@
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -372,14 +372,18 @@
__ push(backtrack_stackpointer());
__ push(ebx);
- static const int argument_count = 3;
+ static const int argument_count = 4;
__ PrepareCallCFunction(argument_count, ecx);
// Put arguments into allocated stack area, last argument highest on stack.
// Parameters are
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
+ // Set isolate.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2.
@@ -838,8 +842,10 @@
__ push(edi);
// Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx);
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
deleted file mode 100644
index 99ae6eb..0000000
--- a/src/ia32/register-allocator-ia32-inl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.code() >= esp.code() && reg.code() <= esi.code();
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-
-// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // eax
- 2, // ecx
- 3, // edx
- 1, // ebx
- -1, // esp
- -1, // ebp
- -1, // esi
- 4 // edi
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved edi register is live on JS function entry.
- Use(edi); // JS function.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
deleted file mode 100644
index 6db13d4..0000000
--- a/src/ia32/register-allocator-ia32.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- Result fresh = code_generator->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- if (is_untagged_int32()) {
- fresh.set_untagged_int32(true);
- if (handle()->IsSmi()) {
- code_generator->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else if (handle()->IsHeapNumber()) {
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- code_generator->masm()->Set(fresh.reg(), Immediate(value));
- } else {
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- } else {
- // Constant is not a number. This was not predicted by AST analysis.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- } else if (code_generator->IsUnsafeSmi(handle())) {
- code_generator->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
- }
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- CodeGenerator* code_generator =
- CodeGeneratorScope::Current(Isolate::Current());
- ASSERT(is_valid());
- if (!is_register() || !reg().is(target)) {
- Result fresh = code_generator->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- code_generator->masm()->mov(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- if (is_untagged_int32()) {
- if (handle()->IsSmi()) {
- code_generator->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else {
- ASSERT(handle()->IsHeapNumber());
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- code_generator->masm()->Set(fresh.reg(), Immediate(value));
- } else {
- code_generator->unsafe_bailout_->Branch(equal);
- code_generator->unsafe_bailout_->Branch(not_equal);
- }
- }
- } else {
- if (code_generator->IsUnsafeSmi(handle())) {
- code_generator->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
- }
- }
- }
- fresh.set_type_info(type_info());
- fresh.set_untagged_int32(is_untagged_int32());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(code_generator->has_valid_frame());
- code_generator->frame()->Spill(target);
- ASSERT(code_generator->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- Result result = AllocateWithoutSpilling();
- // Check that the register is a byte register. If not, unuse the
- // register if valid and return an invalid result.
- if (result.is_valid() && !result.reg().is_byte_register()) {
- result.Unuse();
- return Result();
- }
- return result;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
deleted file mode 100644
index e7ce91f..0000000
--- a/src/ia32/register-allocator-ia32.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- static const int kNumRegisters = 5;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 7730ee3..27d2886 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -1921,7 +1921,7 @@
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- if (!isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
return isolate()->heap()->undefined_value();
}
@@ -3292,7 +3292,7 @@
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ mov(ebx, edi);
__ cmp(eax, arg_number);
- if (isolate()->cpu_features()->IsSupported(CMOV)) {
+ if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
} else {
@@ -3611,10 +3611,10 @@
// processors that don't support SSE2. The code in IntegerConvert
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
- if (isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (CpuFeatures::IsSupported(SSE2)) {
if (array_type != kExternalIntArray &&
array_type != kExternalUnsignedIntArray) {
- ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+ ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
@@ -3629,6 +3629,7 @@
__ bind(&done);
}
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
@@ -3642,7 +3643,7 @@
break;
}
} else {
- if (isolate()->cpu_features()->IsSupported(SSE3)) {
+ if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
// fisttp stores values as signed integers. To represent the
// entire range of int and unsigned int arrays, store as a
@@ -3655,7 +3656,7 @@
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
} else {
- ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+ ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
// We can easily implement the correct rounding behavior for the
// range [0, 2^31-1]. For the time being, to keep this code simple,
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
deleted file mode 100644
index 2613caf..0000000
--- a/src/ia32/virtual-frame-ia32.cc
+++ /dev/null
@@ -1,1366 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ mov(Operand(ebp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
- } else {
- __ Set(Operand(ebp, fp_relative(index)),
- Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ push(Immediate(Smi::FromInt(0)));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->PushUnsafeSmi(element.handle());
- } else {
- __ push(Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(ebp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
-
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) {
- if (i <= stack_pointer_) {
- SyncElementBelowStackPointer(i);
- } else {
- SyncElementByPushing(i);
- }
- }
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // All number type information is reset to unknown for a mergable frame
- // because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
- } else {
- __ Set(fresh.reg(), Immediate(element.handle()));
- }
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ mov(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ sub(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- //
- // Moving memory-backed copies to memory requires a spare register
- // for the memory-to-memory moves. Since we are performing a merge,
- // we use esi (which is already saved in the frame). We keep track
- // of the index of the frame element esi is caching or kIllegalIndex
- // if esi has not been disturbed.
- int esi_caches = kIllegalIndex;
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- if (cgen()->IsUnsafeSmi(source.handle())) {
- esi_caches = i;
- cgen()->MoveUnsafeSmi(esi, source.handle());
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
- }
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- // If we have to spill a register, we spill esi.
- if (esi_caches != backing_index) {
- esi_caches = backing_index;
- __ mov(esi, Operand(ebp, fp_relative(backing_index)));
- }
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-
- if (esi_caches != kIllegalIndex) {
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ mov(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ mov(target_reg, Operand(ebp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(source.handle())) {
- cgen()->MoveUnsafeSmi(target_reg, source.handle());
- } else {
- __ Set(target_reg, Immediate(source.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ mov(new_backing_reg,
- Operand(ebp, fp_relative(backing_index)));
- __ mov(target_reg, new_backing_reg);
- } else {
- __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
- }
- } else {
- __ mov(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-void VirtualFrame::Enter() {
- // Registers live on entry: esp, ebp, esi, edi.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that edi contains a JS function. The following code
- // relies on eax being available for use.
- __ test(edi, Immediate(kSmiTagMask));
- __ Check(not_zero,
- "VirtualFrame::Enter - edi is not a function (smi check).");
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ Check(equal,
- "VirtualFrame::Enter - edi is not a function (map check).");
- }
-#endif
-
- EmitPush(ebp);
-
- __ mov(ebp, Operand(esp));
-
- // Store the context in the frame. The context is kept in esi and a
- // copy is stored in the frame. The external reference to esi
- // remains.
- EmitPush(esi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in edi or spill it later).
- Push(edi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(edi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
- __ mov(esp, Operand(ebp));
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(ebp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = FACTORY->undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count == 1) {
- __ push(Immediate(undefined));
- } else if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(undefined));
- for (int i = 0; i < count; i++) {
- __ push(temp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- ASSERT(tmp.is_valid());
- __ mov(cnt.reg(), Immediate(count));
- __ mov(tmp.reg(), Immediate(undefined));
- __ bind(&alloc_locals_loop);
- __ push(tmp.reg());
- __ dec(cnt.reg());
- __ j(not_zero, &alloc_locals_loop);
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(Operand(ebp, fp_relative(context_index())), esi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ mov(backing_reg, Operand(ebp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(esp, 0));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- if (original.is_copy()) {
- original = elements_[original.index()];
- index = original.index();
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY:
- case FrameElement::REGISTER: {
- Label done;
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Register fresh_reg = fresh.reg();
- FrameElement new_element =
- FrameElement::RegisterElement(fresh_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- new_element.set_untagged_int32(true);
- Use(fresh_reg, element_count());
- fresh.Unuse(); // BreakTarget does not handle a live Result well.
- elements_.Add(new_element);
- if (original.is_register()) {
- __ mov(fresh_reg, original.reg());
- } else {
- ASSERT(original.is_memory());
- __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
- }
- // Now convert the value to int32, or bail out.
- if (original.type_info().IsSmi()) {
- __ SmiUntag(fresh_reg);
- // Pushing the element is completely done.
- } else {
- __ test(fresh_reg, Immediate(kSmiTagMask));
- Label not_smi;
- __ j(not_zero, ¬_smi);
- __ SmiUntag(fresh_reg);
- __ jmp(&done);
-
- __ bind(¬_smi);
- if (!original.type_info().IsNumber()) {
- __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
- FACTORY->heap_number_map());
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
-
- if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
- UNREACHABLE();
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(fresh_reg, Operand(xmm0));
- __ cvtsi2sd(xmm1, Operand(fresh_reg));
- __ ucomisd(xmm0, xmm1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- cgen()->unsafe_bailout_->Branch(parity_even); // NaN.
- // Test for negative zero.
- __ test(fresh_reg, Operand(fresh_reg));
- __ j(not_zero, &done);
- __ movmskpd(fresh_reg, xmm0);
- __ and_(fresh_reg, 0x1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
- __ bind(&done);
- }
- break;
- }
- case FrameElement::CONSTANT:
- elements_.Add(CopyElementAt(index));
- elements_[element_count() - 1].set_untagged_int32(true);
- break;
- case FrameElement::COPY:
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(eax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(eax)) {
- if (arg1->is_register() && arg1->reg().is(edx)) {
- // Wrong registers.
- __ xchg(eax, edx);
- } else {
- // Register edx is free for arg0, which frees eax for arg1.
- arg0->ToRegister(edx);
- arg1->ToRegister(eax);
- }
- } else {
- // Register eax is free for arg1, which guarantees edx is free for
- // arg0.
- arg1->ToRegister(eax);
- arg0->ToRegister(edx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in edi. Move it in there.
- function.ToRegister(edi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ call(code, rmode);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. The IC expects
- // name in ecx and receiver in eax.
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0); // No stack arguments.
- MoveResultsToRegisters(&name, &receiver, ecx, eax);
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in eax and edx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, eax, edx);
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in ecx, value in eax, and receiver in edx.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
- : Builtins::kStoreIC_Initialize));
-
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(eax);
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(ecx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in eax, key in ecx, and receiver in edx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(eax) ||
- (value.is_register() && value.reg().is(eax))) {
- if (!cgen()->allocator()->is_used(eax)) {
- value.ToRegister(eax);
- }
- MoveResultsToRegisters(&key, &receiver, ecx, edx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(ecx) ||
- (key.is_register() && key.reg().is(ecx))) {
- if (!cgen()->allocator()->is_used(ecx)) {
- key.ToRegister(ecx);
- }
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(edx) ||
- (receiver.is_register() && receiver.reg().is(edx))) {
- if (!cgen()->allocator()->is_used(edx)) {
- receiver.ToRegister(edx);
- }
- MoveResultsToRegisters(&key, &value, ecx, eax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of eax, ecx, edx.
- ASSERT(value.is_register());
- if (value.reg().is(ecx)) {
- __ xchg(eax, edx);
- __ xchg(eax, ecx);
- } else {
- __ xchg(eax, ecx);
- __ xchg(eax, edx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
- : Builtins::kKeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = Isolate::Current()->stub_cache()->ComputeCallInitialize(
- arg_count, in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- Isolate::Current()->stub_cache()->ComputeKeyedCallInitialize(arg_count,
- in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in eax, function in edi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(edi);
-
- // Constructors are called with the number of arguments in register
- // eax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(eax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), Immediate(arg_count));
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
- ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- temp.set_untagged_int32(element.is_untagged_int32());
- return temp;
- }
-
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(!element.is_untagged_int32());
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- ASSERT(!element.is_untagged_int32());
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- Result return_value(element.reg(), info);
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- } else {
- ASSERT(element.is_constant());
- Result return_value(element.handle());
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- }
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(Operand operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
- ASSERT(!ConstantPoolOverflowed());
- elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
- elements_[element_count() - 1].set_untagged_int32(true);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(value));
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
deleted file mode 100644
index 504a8fc..0000000
--- a/src/ia32/virtual-frame-ia32.h
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
-#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame: public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() {
- return CodeGeneratorScope::Current(Isolate::Current());
- }
-
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() { return element_count() - expression_base_index(); }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Make the two registers distinct and spill them. Returns the second
- // register. If the registers were not distinct then it returns the new
- // second register.
- Result MakeDistinctAndSpilled(Result* left, Result* right) {
- Spill(left->reg());
- Spill(right->reg());
- if (left->reg().is(right->reg())) {
- RegisterAllocator* allocator = cgen()->allocator();
- Result fresh = allocator->Allocate();
- ASSERT(fresh.is_valid());
- masm()->mov(fresh.reg(), right->reg());
- return fresh;
- }
- return *right;
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 10;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(esp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void UntaggedPushLocalAt(int index) {
- UntaggedPushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() {
- PushFrameSlotAt(function_index());
- }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void UntaggedPushParameterAt(int index) {
- UntaggedPushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() {
- return ParameterAt(-1);
- }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0) and
- // eax (arg1). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(const Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call. The argument count does not
- // include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() {
- Drop(1);
- }
-
- // Duplicate the top element of the frame.
- void Dup() {
- PushFrameSlotAt(element_count() - 1);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(Operand operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Operand operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- void PushUntaggedElement(Handle<Object> value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- // This assert will trigger if you try to push the same value twice.
- ASSERT(result->is_valid());
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- if (cgen()->in_safe_int32_mode()) {
- ASSERT(result->is_untagged_int32());
- elements_[element_count() - 1].set_untagged_int32(true);
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // Check that the frame has no elements containing untagged int32 elements.
- bool HasNoUntaggedInt32Elements() {
- for (int i = 0; i < element_count(); ++i) {
- if (elements_[i].is_untagged_int32()) return false;
- }
- return true;
- }
-
- // Update the type information of a variable frame element directly.
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
-
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() {
- return parameter_count() + 2;
- }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() {
- return 1;
- }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() {
- return frame_pointer() + 1;
- }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() {
- return frame_pointer() + 2;
- }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() {
- return frame_pointer() + 3;
- }
-
- // The index of the base of the expression stack.
- int expression_base_index() {
- return local0_index() + local_count();
- }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame, at an untagged int32 value. Bails out if the value is not
- // an int32.
- void UntaggedPushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_