Move V8 to external/v8

Change-Id: If68025d67453785a651c5dfb34fad298c16676a4
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
new file mode 100644
index 0000000..9a5352b
--- /dev/null
+++ b/src/ia32/assembler-ia32-inl.h
@@ -0,0 +1,319 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
+#define V8_IA32_ASSEMBLER_IA32_INL_H_
+
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(intptr_t delta) {
+  if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p -= delta;  // relocate entry
+  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+    // Special handling of js_return when a break point is set (call
+    // instruction has been inserted).
+    int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+    *p -= delta;  // relocate entry
+  } else if (IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p += delta;  // relocate entry
+  }
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return *reinterpret_cast<Object**>(pc_);
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsCallInstruction());
+  return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsCallInstruction());
+  Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+  ASSERT(IsCallInstruction());
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsCallInstruction());
+  return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  ASSERT(IsCallInstruction());
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+  return *pc_ == 0xE8;
+}
+
+
+Immediate::Immediate(int x)  {
+  x_ = x;
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+  x_ = reinterpret_cast<int32_t>(ext.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Immediate::Immediate(const char* s) {
+  x_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Immediate::Immediate(Label* internal_offset) {
+  x_ = reinterpret_cast<int32_t>(internal_offset);
+  rmode_ = RelocInfo::INTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    x_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    x_ =  reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+
+Immediate::Immediate(Smi* value) {
+  x_ = reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+void Assembler::emit(uint32_t x) {
+  *reinterpret_cast<uint32_t*>(pc_) = x;
+  pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    emit(reinterpret_cast<intptr_t>(handle.location()),
+         RelocInfo::EMBEDDED_OBJECT);
+  } else {
+    // no relocation needed
+    emit(reinterpret_cast<intptr_t>(obj));
+  }
+}
+
+
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
+  if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
+  emit(x);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+  if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
+    Label* label = reinterpret_cast<Label*>(x.x_);
+    emit_code_relative_offset(label);
+    return;
+  }
+  if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
+  emit(x.x_);
+}
+
+
+void Assembler::emit_code_relative_offset(Label* label) {
+  if (label->is_bound()) {
+    int32_t pos;
+    pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
+    emit(pos);
+  } else {
+    emit_disp(label, Displacement::CODE_RELATIVE);
+  }
+}
+
+
+void Assembler::emit_w(const Immediate& x) {
+  ASSERT(x.rmode_ == RelocInfo::NONE);
+  uint16_t value = static_cast<uint16_t>(x.x_);
+  reinterpret_cast<uint16_t*>(pc_)[0] = value;
+  pc_ += sizeof(uint16_t);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  int32_t* p = reinterpret_cast<int32_t*>(pc);
+  *p = target - (pc + sizeof(int32_t));
+  CPU::FlushICache(p, sizeof(int32_t));
+}
+
+
+Displacement Assembler::disp_at(Label* L) {
+  return Displacement(long_at(L->pos()));
+}
+
+
+void Assembler::disp_at_put(Label* L, Displacement disp) {
+  long_at_put(L->pos(), disp.data());
+}
+
+
+void Assembler::emit_disp(Label* L, Displacement::Type type) {
+  Displacement disp(L, type);
+  L->link_to(pc_offset());
+  emit(static_cast<int>(disp.data()));
+}
+
+
+void Operand::set_modrm(int mod, Register rm) {
+  ASSERT((mod & -4) == 0);
+  buf_[0] = mod << 6 | rm.code();
+  len_ = 1;
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+  ASSERT(len_ == 1);
+  ASSERT((scale & -4) == 0);
+  // Use SIB with no index register only for base esp.
+  ASSERT(!index.is(esp) || base.is(esp));
+  buf_[1] = scale << 6 | index.code() << 3 | base.code();
+  len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+  ASSERT(len_ == 1 || len_ == 2);
+  *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
+  ASSERT(len_ == 1 || len_ == 2);
+  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+  *p = disp;
+  len_ += sizeof(int32_t);
+  rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+  // reg
+  set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
+  // [disp/r]
+  set_modrm(0, ebp);
+  set_dispr(disp, rmode);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
new file mode 100644
index 0000000..b8dda17
--- /dev/null
+++ b/src/ia32/assembler-ia32.cc
@@ -0,0 +1,2254 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+// Safe default is no features.
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::enabled_ = 0;
+
+
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
+void CpuFeatures::Probe() {
+  ASSERT(Heap::HasBeenSetup());
+  ASSERT(supported_ == 0);
+  if (Serializer::enabled()) return;  // No features if we might serialize.
+
+  Assembler assm(NULL, 0);
+  Label cpuid, done;
+#define __ assm.
+  // Save old esp, since we are going to modify the stack.
+  __ push(ebp);
+  __ pushfd();
+  __ push(ecx);
+  __ push(ebx);
+  __ mov(ebp, Operand(esp));
+
+  // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+  __ pushfd();
+  __ pop(eax);
+  __ mov(edx, Operand(eax));
+  __ xor_(eax, 0x200000);  // Flip bit 21.
+  __ push(eax);
+  __ popfd();
+  __ pushfd();
+  __ pop(eax);
+  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
+  __ j(not_zero, &cpuid);
+
+  // CPUID not supported. Clear the supported features in edx:eax.
+  __ xor_(eax, Operand(eax));
+  __ xor_(edx, Operand(edx));
+  __ jmp(&done);
+
+  // Invoke CPUID with 1 in eax to get feature information in
+  // ecx:edx. Temporarily enable CPUID support because we know it's
+  // safe here.
+  __ bind(&cpuid);
+  __ mov(eax, 1);
+  supported_ = (1 << CPUID);
+  { Scope fscope(CPUID);
+    __ cpuid();
+  }
+  supported_ = 0;
+
+  // Move the result from ecx:edx to edx:eax and make sure to mark the
+  // CPUID feature as supported.
+  __ mov(eax, Operand(edx));
+  __ or_(eax, 1 << CPUID);
+  __ mov(edx, Operand(ecx));
+
+  // Done.
+  __ bind(&done);
+  __ mov(esp, Operand(ebp));
+  __ pop(ebx);
+  __ pop(ecx);
+  __ popfd();
+  __ pop(ebp);
+  __ ret(0);
+#undef __
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code = Heap::CreateCode(desc,
+                                  NULL,
+                                  Code::ComputeFlags(Code::STUB),
+                                  Handle<Code>::null());
+  if (!code->IsCode()) return;
+  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                      Code::cast(code), "CpuFeatures::Probe"));
+  typedef uint64_t (*F0)();
+  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  supported_ = probe();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Displacement
+
+void Displacement::init(Label* L, Type type) {
+  ASSERT(!L->is_bound());
+  int next = 0;
+  if (L->is_linked()) {
+    next = L->pos();
+    ASSERT(next > 0);  // Displacements must be at positions > 0
+  }
+  // Ensure that we _never_ overflow the next field.
+  ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+  data_ = NextField::encode(next) | TypeField::encode(type);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+  RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+    1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc_ + i) = *(instructions + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Call instruction takes up 5 bytes and int3 takes up one byte.
+  static const int kCallCodeSize = 5;
+  int code_size = kCallCodeSize + guard_bytes;
+
+  // Create a code patcher.
+  CodePatcher patcher(pc_, code_size);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_codesize;
+  patcher.masm()->bind(&check_codesize);
+#endif
+
+  // Patch the code.
+  patcher.masm()->call(target, RelocInfo::NONE);
+
+  // Check that the size of the code generated is as expected.
+  ASSERT_EQ(kCallCodeSize,
+            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
+  // Add the requested number of int3 instructions after the call.
+  for (int i = 0; i < guard_bytes; i++) {
+    patcher.masm()->int3();
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
+  // [base + disp/r]
+  if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+    // [base]
+    set_modrm(0, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+  } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+    // [base + disp8]
+    set_modrm(1, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+    set_disp8(disp);
+  } else {
+    // [base + disp/r]
+    set_modrm(2, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+    set_dispr(disp, rmode);
+  }
+}
+
+
+Operand::Operand(Register base,
+                 Register index,
+                 ScaleFactor scale,
+                 int32_t disp,
+                 RelocInfo::Mode rmode) {
+  ASSERT(!index.is(esp));  // illegal addressing mode
+  // [base + index*scale + disp/r]
+  if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+    // [base + index*scale]
+    set_modrm(0, esp);
+    set_sib(scale, index, base);
+  } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+    // [base + index*scale + disp8]
+    set_modrm(1, esp);
+    set_sib(scale, index, base);
+    set_disp8(disp);
+  } else {
+    // [base + index*scale + disp/r]
+    set_modrm(2, esp);
+    set_sib(scale, index, base);
+    set_dispr(disp, rmode);
+  }
+}
+
+
+Operand::Operand(Register index,
+                 ScaleFactor scale,
+                 int32_t disp,
+                 RelocInfo::Mode rmode) {
+  ASSERT(!index.is(esp));  // illegal addressing mode
+  // [index*scale + disp/r]
+  set_modrm(0, esp);
+  set_sib(scale, index, ebp);
+  set_dispr(disp, rmode);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+  return ((buf_[0] & 0xF8) == 0xC0)  // addressing mode is register only.
+      && ((buf_[0] & 0x07) == reg.code());  // register codes match.
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x)                                 \
+  *pc_++ = (x)
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+// spare_buffer_
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Clear the buffer in debug mode unless it was provided by the
+  // caller in which case we can't be sure it's okay to overwrite
+  // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+  if (own_buffer_) {
+    memset(buffer_, 0xCC, buffer_size);  // int3
+  }
+#endif
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+  last_pc_ = NULL;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+  InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // finalize code
+  // (at this point overflow() may be true, but the gap ensures that
+  // we are still not overlapping instructions and relocation info)
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+
+  Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::cpuid() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (x.is_int8()) {
+    EMIT(0x6a);
+    EMIT(x.x_);
+  } else {
+    EMIT(0x68);
+    emit(x);
+  }
+}
+
+
+void Assembler::push(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+  ASSERT(reloc_info_writer.last_pc() != NULL);
+  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+    // (last_pc_ != NULL) is rolled into the above check
+    // If a last_pc_ is set, we need to make sure that there has not been any
+    // relocation information generated between the last instruction and this
+    // pop instruction.
+    byte instr = last_pc_[0];
+    if ((instr & ~0x7) == 0x50) {
+      int push_reg_code = instr & 0x7;
+      if (push_reg_code == dst.code()) {
+        pc_ = last_pc_;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
+        }
+      } else {
+        // Convert 'push src; pop dst' to 'mov dst, src'.
+        last_pc_[0] = 0x8b;
+        Register src = { push_reg_code };
+        EnsureSpace ensure_space(this);
+        emit_operand(dst, Operand(src));
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
+        }
+      }
+      last_pc_ = NULL;
+      return;
+    } else if (instr == 0xff) {  // push of an operand, convert to a move
+      byte op1 = last_pc_[1];
+      // Check if the operation is really a push
+      if ((op1 & 0x38) == (6 << 3)) {
+        op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
+        last_pc_[0] = 0x8b;
+        last_pc_[1] = op1;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if ((instr == 0x89) &&
+               (last_pc_[1] == 0x04) &&
+               (last_pc_[2] == 0x24)) {
+      // 0x71283c   396  890424         mov [esp],eax
+      // 0x71283f   399  58             pop eax
+      if (dst.is(eax)) {
+        // change to
+        // 0x710fac   216  83c404         add esp,0x4
+        last_pc_[0] = 0x83;
+        last_pc_[1] = 0xc4;
+        last_pc_[2] = 0x04;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if (instr == 0x6a && dst.is(eax)) {  // push of immediate 8 bit
+      byte imm8 = last_pc_[1];
+      if (imm8 == 0) {
+        // 6a00         push 0x0
+        // 58           pop eax
+        last_pc_[0] = 0x31;
+        last_pc_[1] = 0xc0;
+        // change to
+        // 31c0         xor eax,eax
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+        }
+        return;
+      } else {
+        // 6a00         push 0xXX
+        // 58           pop eax
+        last_pc_[0] = 0xb8;
+        EnsureSpace ensure_space(this);
+        if ((imm8 & 0x80) != 0) {
+          EMIT(0xff);
+          EMIT(0xff);
+          EMIT(0xff);
+          // change to
+          // b8XXffffff   mov eax,0xffffffXX
+        } else {
+          EMIT(0x00);
+          EMIT(0x00);
+          EMIT(0x00);
+          // change to
+          // b8XX000000   mov eax,0x000000XX
+        }
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if (instr == 0x68 && dst.is(eax)) {  // push of immediate 32 bit
+      // 68XXXXXXXX   push 0xXXXXXXXX
+      // 58           pop eax
+      last_pc_[0] = 0xb8;
+      last_pc_ = NULL;
+      // change to
+      // b8XXXXXXXX   mov eax,0xXXXXXXXX
+      if (FLAG_print_push_pop_elimination) {
+        PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+      }
+      return;
+    }
+
+    // Other potential patterns for peephole:
+    // 0x712716   102  890424         mov [esp], eax
+    // 0x712719   105  8b1424         mov edx, [esp]
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8F);
+  emit_operand(eax, dst);
+}
+
+
+void Assembler::enter(const Immediate& size) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC8);
+  emit_w(size);
+  EMIT(0);
+}
+
+
+void Assembler::leave() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC9);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8A);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC6);
+  emit_operand(eax, dst);
+  EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x88);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(x);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x89);
+  EMIT(0xC0 | src.code() << 3 | dst.code());
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC7);
+  emit_operand(eax, dst);
+  emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC7);
+  emit_operand(eax, dst);
+  emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xBE);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xBF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xB6);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xB7);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(imm32);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(handle);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: 0f 40 + cc /r
+  EMIT(0x0F);
+  EMIT(0x40 + cc);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding
+    EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+  } else {
+    EMIT(0x87);
+    EMIT(0xC0 | src.code() << 3 | dst.code());
+  }
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x13);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x03);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+  ASSERT(reloc_info_writer.last_pc() != NULL);
+  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+    byte instr = last_pc_[0];
+    if ((instr & 0xf8) == 0x50) {
+      // Last instruction was a push. Check whether this is a pop without a
+      // result.
+      if ((dst.is_reg(esp)) &&
+          (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
+        pc_ = last_pc_;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    }
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(4, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x23);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x21);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(const Operand& op, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x80);
+  emit_operand(edi, op);  // edi == 7
+  EMIT(imm8);
+}
+
+
+void Assembler::cmpw(const Operand& op, Immediate imm16) {
+  ASSERT(imm16.is_int16());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x81);
+  emit_operand(edi, op);
+  emit_w(imm16);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x3B);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, op, imm);
+}
+
+
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, op, Immediate(handle));
+}
+
+
+void Assembler::cmpb_al(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x38);  // CMP r/m8, r8
+  emit_operand(eax, op);  // eax has same code as register al.
+}
+
+
+void Assembler::cmpw_ax(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x39);  // CMP r/m16, r16
+  emit_operand(eax, op);  // eax has same code as register ax.
+}
+
+
+void Assembler::dec_b(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFE);
+  EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xE8 | reg.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (is_int8(imm32)) {
+    EMIT(0x6B);
+    EMIT(0xC0 | dst.code() << 3 | src.code());
+    EMIT(imm32);
+  } else {
+    EMIT(0x69);
+    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit(imm32);
+  }
+}
+
+
+void Assembler::inc(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8D);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x09);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xD0 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xD0 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xF8 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xF8 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::sar(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x1B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA5);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xE0 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xE0 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::shl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAD);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  EMIT(0xC1);
+  EMIT(0xE8 | dst.code());
+  EMIT(imm8);
+}
+
+
+void Assembler::shr(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::shr_cl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD1);
+  EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x2B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x29);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Only use test against byte for registers that have a byte
+  // variant: eax, ebx, ecx, and edx.
+  if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
+    uint8_t imm8 = imm.x_;
+    if (reg.is(eax)) {
+      EMIT(0xA8);
+      EMIT(imm8);
+    } else {
+      emit_arith_b(0xF6, 0xC0, reg, imm8);
+    }
+  } else {
+    // This is not using emit_arith because test doesn't support
+    // sign-extension of 8-bit operands.
+    if (reg.is(eax)) {
+      EMIT(0xA9);
+    } else {
+      EMIT(0xF7);
+      EMIT(0xC0 | reg.code());
+    }
+    emit(imm);
+  }
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x85);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  emit_operand(eax, op);
+  emit(imm);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x33);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& src, Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x31);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(6, dst, x);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA3);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAB);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::hlt() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x90);
+}
+
+
+void Assembler::rdtsc() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x31);
+}
+
+
+void Assembler::ret(int imm16) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint16(imm16));
+  if (imm16 == 0) {
+    EMIT(0xC3);
+  } else {
+    EMIT(0xC2);
+    EMIT(imm16 & 0xFF);
+    EMIT((imm16 >> 8) & 0xFF);
+  }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      Displacement disp = disp_at(&l);
+      PrintF("@ %d ", l.pos());
+      disp.print();
+      PrintF("\n");
+      disp.next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    Displacement disp = disp_at(L);
+    int fixup_pos = L->pos();
+    if (disp.type() == Displacement::CODE_RELATIVE) {
+      // Relative to Code* heap object pointer.
+      long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
+    } else {
+      if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+        ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
+      }
+      // relative address, relative to point after address
+      int imm32 = pos - (fixup_pos + sizeof(int32_t));
+      long_at_put(fixup_pos, imm32);
+    }
+    disp.next(L);
+  }
+  L->bind_to(pos);
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // append appendix to L's list
+      Label p;
+      Label q = *L;
+      do {
+        p = q;
+        Displacement disp = disp_at(&q);
+        disp.next(&q);
+      } while (q.is_linked());
+      Displacement disp = disp_at(&p);
+      disp.link_to(appendix);
+      disp_at_put(&p, disp);
+      p.Unuse();  // to avoid assertion failure in ~Label
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int long_size = 5;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    // 1110 1000 #32-bit disp
+    EMIT(0xE8);
+    emit(offs - long_size);
+  } else {
+    // 1110 1000 #32-bit disp
+    EMIT(0xE8);
+    emit_disp(L, Displacement::OTHER);
+  }
+}
+
+
+void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::call(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(edx, adr);
+}
+
+
+void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
+  WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void Assembler::jmp(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int short_size = 2;
+    const int long_size  = 5;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    if (is_int8(offs - short_size)) {
+      // 1110 1011 #8-bit disp
+      EMIT(0xEB);
+      EMIT((offs - short_size) & 0xFF);
+    } else {
+      // 1110 1001 #32-bit disp
+      EMIT(0xE9);
+      emit(offs - long_size);
+    }
+  } else {
+    // 1110 1001 #32-bit disp
+    EMIT(0xE9);
+    emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+  }
+}
+
+
+void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE9);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE9);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+
+void Assembler::j(Condition cc, Label* L, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(0 <= cc && cc < 16);
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  if (L->is_bound()) {
+    const int short_size = 2;
+    const int long_size  = 6;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    if (is_int8(offs - short_size)) {
+      // 0111 tttn #8-bit disp
+      EMIT(0x70 | cc);
+      EMIT((offs - short_size) & 0xFF);
+    } else {
+      // 0000 1111 1000 tttn #32-bit disp
+      EMIT(0x0F);
+      EMIT(0x80 | cc);
+      emit(offs - long_size);
+    }
+  } else {
+    // 0000 1111 1000 tttn #32-bit disp
+    // Note: could eliminate cond. jumps to this jump if condition
+    //       is the same however, seems to be rather unlikely case.
+    EMIT(0x0F);
+    EMIT(0x80 | cc);
+    emit_disp(L, Displacement::OTHER);
+  }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT((0 <= cc) && (cc < 16));
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  // 0000 1111 1000 tttn #32-bit disp
+  EMIT(0x0F);
+  EMIT(0x80 | cc);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  // 0000 1111 1000 tttn #32-bit disp
+  EMIT(0x0F);
+  EMIT(0x80 | cc);
+  emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
+}
+
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE8);
+}
+
+
+void Assembler::fldz() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDD);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDD);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(ecx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE0);
+}
+
+
+void Assembler::fcos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFE);
+}
+
+
+void Assembler::fadd(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDA);
+  emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDA);
+  EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDE);
+  EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFC);
+}
+
+
+void Assembler::fnclex() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  EMIT(0xE2);
+}
+
+
+void Assembler::sahf() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9E);
+}
+
+
+void Assembler::setcc(Condition cc, Register reg) {
+  ASSERT(reg.is_byte_register());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x90 | cc);
+  EMIT(0xC0 | reg.code());
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2F);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdbl(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  movsd(dst, src);
+}
+
+
+void Assembler::movdbl(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  movsd(dst, src);
+}
+
+
+void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);  // double
+  EMIT(0x0F);
+  EMIT(0x11);  // store
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);  // double
+  EMIT(0x0F);
+  EMIT(0x10);  // load
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+  Register ireg = { reg.code() };
+  emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+  EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
+void Assembler::Print() {
+  Disassembler::Decode(stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  ASSERT(overflow());  // should not call this otherwise
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else {
+    desc.buffer_size = 2*buffer_size_;
+  }
+  // Some internal data structures overflow for very large buffers,
+  // they must ensure that kMaximalBufferSize is not too large.
+  if ((desc.buffer_size > kMaximalBufferSize) ||
+      (desc.buffer_size > Heap::OldGenerationSize())) {
+    V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+  }
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+  // Clear the buffer in debug mode. Use 'int3' instructions to make
+  // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+  memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+  // copy the data
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(rc_delta + reloc_info_writer.pos(),
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+    spare_buffer_ = buffer_;
+  } else {
+    DeleteArray(buffer_);
+  }
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  if (last_pc_ != NULL) {
+    last_pc_ += pc_delta;
+  }
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // relocate runtime entries
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      *p -= pc_delta;  // relocate entry
+    } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      if (*p != 0) {  // 0 means uninitialized.
+        *p += pc_delta;
+      }
+    }
+  }
+
+  ASSERT(!overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+  ASSERT(is_uint8(op1) && is_uint8(op2));  // wrong opcode
+  ASSERT(is_uint8(imm8));
+  ASSERT((op1 & 0x01) == 0);  // should be 8bit operation
+  EMIT(op1);
+  EMIT(op2 | dst.code());
+  EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+  ASSERT((0 <= sel) && (sel <= 7));
+  Register ireg = { sel };
+  if (x.is_int8()) {
+    EMIT(0x83);  // using a sign-extended 8-bit immediate.
+    emit_operand(ireg, dst);
+    EMIT(x.x_ & 0xFF);
+  } else if (dst.is_reg(eax)) {
+    EMIT((sel << 3) | 0x05);  // short form if the destination is eax.
+    emit(x);
+  } else {
+    EMIT(0x81);  // using a literal 32-bit immediate.
+    emit_operand(ireg, dst);
+    emit(x);
+  }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+  const unsigned length = adr.len_;
+  ASSERT(length > 0);
+
+  // Emit updated ModRM byte containing the given register.
+  pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
+
+  // Emit the rest of the encoded operand.
+  for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+  pc_ += length;
+
+  // Emit relocation information if necessary.
+  if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) {
+    pc_ -= sizeof(int32_t);  // pc_ must be *at* disp32
+    RecordRelocInfo(adr.rmode_);
+    pc_ += sizeof(int32_t);
+  }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  ASSERT(0 <= i &&  i < 8);  // illegal stack offset
+  EMIT(b1);
+  EMIT(b2 + i);
+}
+
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
+  EnsureSpace ensure_space(this);
+  emit(data, reloc_info);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  ASSERT(rmode != RelocInfo::NONE);
+  // Don't record external references unless the heap will be serialized.
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+      !Serializer::enabled() &&
+      !FLAG_debug_code) {
+    return;
+  }
+  RelocInfo rinfo(pc_, rmode, data);
+  reloc_info_writer.Write(&rinfo);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitCoverageLog() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void LogGeneratedCodeCoverage(const char* file_line) {
+  const char* return_address = (&file_line)[-1];
+  char* push_insn = const_cast<char*>(return_address - 12);
+  push_insn[0] = 0xeb;  // Relative branch insn.
+  push_insn[1] = 13;    // Skip over coverage insns.
+  if (coverage_log != NULL) {
+    fprintf(coverage_log, "%s\n", file_line);
+    fflush(coverage_log);
+  }
+}
+
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
new file mode 100644
index 0000000..610017b
--- /dev/null
+++ b/src/ia32/assembler-ia32.h
@@ -0,0 +1,877 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_IA32_ASSEMBLER_IA32_H_
+#define V8_IA32_ASSEMBLER_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < 8; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  // eax, ebx, ecx and edx are byte registers, the rest are not.
+  bool is_byte_register() const  { return code_ <= 3; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+const Register eax = { 0 };
+const Register ecx = { 1 };
+const Register edx = { 2 };
+const Register ebx = { 3 };
+const Register esp = { 4 };
+const Register ebp = { 5 };
+const Register esi = { 6 };
+const Register edi = { 7 };
+const Register no_reg = { -1 };
+
+
+struct XMMRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 2; }  // currently
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
+
+enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  below         =  2,
+  above_equal   =  3,
+  equal         =  4,
+  not_equal     =  5,
+  below_equal   =  6,
+  above         =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  // aliases
+  carry         = below,
+  not_carry     = above_equal,
+  zero          = equal,
+  not_zero      = not_equal,
+  sign          = negative,
+  not_sign      = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case below:
+      return above;
+    case above:
+      return below;
+    case above_equal:
+      return below_equal;
+    case below_equal:
+      return above_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+enum Hint {
+  no_hint = 0,
+  not_taken = 0x2e,
+  taken = 0x3e
+};
+
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition.  That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+  return (hint == no_hint)
+      ? no_hint
+      : ((hint == not_taken) ? taken : not_taken);
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+  inline explicit Immediate(int x);
+  inline explicit Immediate(const char* s);
+  inline explicit Immediate(const ExternalReference& ext);
+  inline explicit Immediate(Handle<Object> handle);
+  inline explicit Immediate(Smi* value);
+
+  static Immediate CodeRelativeOffset(Label* label) {
+    return Immediate(label);
+  }
+
+  bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
+  bool is_int8() const {
+    return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
+  }
+  bool is_int16() const {
+    return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
+  }
+
+ private:
+  inline explicit Immediate(Label* value);
+
+  int x_;
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+  times_1 = 0,
+  times_2 = 1,
+  times_4 = 2,
+  times_8 = 3,
+  times_pointer_size = times_4,
+  times_half_pointer_size = times_2
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
+  // [disp/r]
+  INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
+  // disp only must always be relocated
+
+  // [base + disp/r]
+  explicit Operand(Register base, int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [base + index*scale + disp/r]
+  explicit Operand(Register base,
+                   Register index,
+                   ScaleFactor scale,
+                   int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [index*scale + disp/r]
+  explicit Operand(Register index,
+                   ScaleFactor scale,
+                   int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  static Operand StaticVariable(const ExternalReference& ext) {
+    return Operand(reinterpret_cast<int32_t>(ext.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  static Operand StaticArray(Register index,
+                             ScaleFactor scale,
+                             const ExternalReference& arr) {
+    return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  // Returns true if this Operand is a wrapper for the specified register.
+  bool is_reg(Register reg) const;
+
+ private:
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
+  // Set the ModRM byte without an encoded 'reg' register. The
+  // register is encoded later as part of the emit_operand operation.
+  inline void set_modrm(int mod, Register rm);
+
+  inline void set_sib(ScaleFactor scale, Register index, Register base);
+  inline void set_disp8(int8_t disp);
+  inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....2|1......0|
+// [  next  |  type  |
+
+class Displacement BASE_EMBEDDED {
+ public:
+  enum Type {
+    UNCONDITIONAL_JUMP,
+    CODE_RELATIVE,
+    OTHER
+  };
+
+  int data() const { return data_; }
+  Type type() const { return TypeField::decode(data_); }
+  void next(Label* L) const {
+    int n = NextField::decode(data_);
+    n > 0 ? L->link_to(n) : L->Unuse();
+  }
+  void link_to(Label* L) { init(L, type()); }
+
+  explicit Displacement(int data) { data_ = data; }
+
+  Displacement(Label* L, Type type) { init(L, type); }
+
+  void print() {
+    PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+                       NextField::decode(data_));
+  }
+
+ private:
+  int data_;
+
+  class TypeField: public BitField<Type, 0, 2> {};
+  class NextField: public BitField<int,  2, 32-2> {};
+
+  void init(Label* L, Type type);
+};
+
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+//   if (CpuFeatures::IsSupported(SSE2)) {
+//     CpuFeatures::Scope fscope(SSE2);
+//     // Generate SSE2 floating point code.
+//   } else {
+//     // Generate standard x87 floating point code.
+//   }
+class CpuFeatures : public AllStatic {
+ public:
+  // Feature flags bit positions. They are mostly based on the CPUID spec.
+  // (We assign CPUID itself to one of the currently reserved bits --
+  // feel free to change this if needed.)
+  enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
+  // Check whether a feature is supported by the target CPU.
+  static bool IsSupported(Feature f) {
+    return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Check whether a feature is currently enabled.
+  static bool IsEnabled(Feature f) {
+    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(Feature f) {
+      ASSERT(CpuFeatures::IsSupported(f));
+      old_enabled_ = CpuFeatures::enabled_;
+      CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+    }
+    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+   private:
+    uint64_t old_enabled_;
+#else
+   public:
+    explicit Scope(Feature f) {}
+#endif
+  };
+ private:
+  static uint64_t supported_;
+  static uint64_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+  // We check before assembling an instruction that there is sufficient
+  // space to write an instruction and its relocation information.
+  // The relocation writer's position must be kGap bytes above the end of
+  // the generated instructions. This leaves enough space for the
+  // longest possible ia32 instruction, 15 bytes, and the longest possible
+  // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+  // (There is a 15 byte limit on ia32 instruction length that rules out some
+  // otherwise valid instructions.)
+  // This allows for a single, fast space check per instruction.
+  static const int kGap = 32;
+
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Read/Modify the code target in the branch/call instruction at pc.
+  inline static Address target_address_at(Address pc);
+  inline static void set_target_address_at(Address pc, Address target);
+
+  // Distance between the address of the code target in the call instruction
+  // and the return address
+  static const int kCallTargetAddressOffset = kPointerSize;
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = 1;  // JMP imm32.
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+  //
+  // - function names correspond one-to-one to ia32 instruction mnemonics
+  // - unless specified otherwise, instructions operate on 32bit operands
+  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+  // - instructions on 16bit (word) operands/registers have a trailing '_w'
+  // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+  // NOTE ON INTERFACE: Currently, the interface is not very consistent
+  // in the sense that some operations (e.g. mov()) can be called in more
+  // the one way to generate the same instruction: The Register argument
+  // can in some cases be replaced with an Operand(Register) argument.
+  // This should be cleaned up and made more orthogonal. The questions
+  // is: should we always use Operands instead of Registers where an
+  // Operand is possible, or should we have a Register (overloaded) form
+  // instead? We must be careful to make sure that the selected instruction
+  // is obvious from the parameters to avoid hard-to-find code generation
+  // bugs.
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2.
+  void Align(int m);
+
+  // Stack
+  void pushad();
+  void popad();
+
+  void pushfd();
+  void popfd();
+
+  void push(const Immediate& x);
+  void push(Register src);
+  void push(const Operand& src);
+  void push(Label* label, RelocInfo::Mode relocation_mode);
+
+  void pop(Register dst);
+  void pop(const Operand& dst);
+
+  void enter(const Immediate& size);
+  void leave();
+
+  // Moves
+  void mov_b(Register dst, const Operand& src);
+  void mov_b(const Operand& dst, int8_t imm8);
+  void mov_b(const Operand& dst, Register src);
+
+  void mov_w(Register dst, const Operand& src);
+  void mov_w(const Operand& dst, Register src);
+
+  void mov(Register dst, int32_t imm32);
+  void mov(Register dst, const Immediate& x);
+  void mov(Register dst, Handle<Object> handle);
+  void mov(Register dst, const Operand& src);
+  void mov(Register dst, Register src);
+  void mov(const Operand& dst, const Immediate& x);
+  void mov(const Operand& dst, Handle<Object> handle);
+  void mov(const Operand& dst, Register src);
+
+  void movsx_b(Register dst, const Operand& src);
+
+  void movsx_w(Register dst, const Operand& src);
+
+  void movzx_b(Register dst, const Operand& src);
+
+  void movzx_w(Register dst, const Operand& src);
+
+  // Conditional moves
+  void cmov(Condition cc, Register dst, int32_t imm32);
+  void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, const Operand& src);
+
+  // Exchange two registers
+  void xchg(Register dst, Register src);
+
+  // Arithmetics
+  void adc(Register dst, int32_t imm32);
+  void adc(Register dst, const Operand& src);
+
+  void add(Register dst, const Operand& src);
+  void add(const Operand& dst, const Immediate& x);
+
+  void and_(Register dst, int32_t imm32);
+  void and_(Register dst, const Operand& src);
+  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, const Immediate& x);
+
+  void cmpb(const Operand& op, int8_t imm8);
+  void cmpb_al(const Operand& op);
+  void cmpw_ax(const Operand& op);
+  void cmpw(const Operand& op, Immediate imm16);
+  void cmp(Register reg, int32_t imm32);
+  void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg, const Operand& op);
+  void cmp(const Operand& op, const Immediate& imm);
+  void cmp(const Operand& op, Handle<Object> handle);
+
+  void dec_b(Register dst);
+
+  void dec(Register dst);
+  void dec(const Operand& dst);
+
+  void cdq();
+
+  void idiv(Register src);
+
+  // Signed multiply instructions.
+  void imul(Register src);                               // edx:eax = eax * src.
+  void imul(Register dst, const Operand& src);           // dst = dst * src.
+  void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
+
+  void inc(Register dst);
+  void inc(const Operand& dst);
+
+  void lea(Register dst, const Operand& src);
+
+  // Unsigned multiply instruction.
+  void mul(Register src);                                // edx:eax = eax * reg.
+
+  void neg(Register dst);
+
+  void not_(Register dst);
+
+  void or_(Register dst, int32_t imm32);
+  void or_(Register dst, const Operand& src);
+  void or_(const Operand& dst, Register src);
+  void or_(const Operand& dst, const Immediate& x);
+
+  void rcl(Register dst, uint8_t imm8);
+
+  void sar(Register dst, uint8_t imm8);
+  void sar(Register dst);
+
+  void sbb(Register dst, const Operand& src);
+
+  void shld(Register dst, const Operand& src);
+
+  void shl(Register dst, uint8_t imm8);
+  void shl(Register dst);
+
+  void shrd(Register dst, const Operand& src);
+
+  void shr(Register dst, uint8_t imm8);
+  void shr(Register dst);
+  void shr_cl(Register dst);
+
+  void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, const Operand& src);
+  void sub(const Operand& dst, Register src);
+
+  void test(Register reg, const Immediate& imm);
+  void test(Register reg, const Operand& op);
+  void test(const Operand& op, const Immediate& imm);
+
+  void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, const Operand& src);
+  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, const Immediate& x);
+
+  // Bit operations.
+  void bt(const Operand& dst, Register src);
+  void bts(const Operand& dst, Register src);
+
+  // Miscellaneous
+  void hlt();
+  void int3();
+  void nop();
+  void rdtsc();
+  void ret(int imm16);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Calls
+  void call(Label* L);
+  void call(byte* entry, RelocInfo::Mode rmode);
+  void call(const Operand& adr);
+  void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Jumps
+  void jmp(Label* L);  // unconditional jump to L
+  void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(const Operand& adr);
+  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Conditional jumps
+  void j(Condition cc, Label* L, Hint hint = no_hint);
+  void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
+  void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+
+  // Floating-point operations
+  void fld(int i);
+
+  void fld1();
+  void fldz();
+
+  void fld_s(const Operand& adr);
+  void fld_d(const Operand& adr);
+
+  void fstp_s(const Operand& adr);
+  void fstp_d(const Operand& adr);
+
+  void fild_s(const Operand& adr);
+  void fild_d(const Operand& adr);
+
+  void fist_s(const Operand& adr);
+
+  void fistp_s(const Operand& adr);
+  void fistp_d(const Operand& adr);
+
+  void fisttp_s(const Operand& adr);
+
+  void fabs();
+  void fchs();
+  void fcos();
+  void fsin();
+
+  void fadd(int i);
+  void fsub(int i);
+  void fmul(int i);
+  void fdiv(int i);
+
+  void fisub_s(const Operand& adr);
+
+  void faddp(int i = 1);
+  void fsubp(int i = 1);
+  void fsubrp(int i = 1);
+  void fmulp(int i = 1);
+  void fdivp(int i = 1);
+  void fprem();
+  void fprem1();
+
+  void fxch(int i = 1);
+  void fincstp();
+  void ffree(int i = 0);
+
+  void ftst();
+  void fucomp(int i);
+  void fucompp();
+  void fcompp();
+  void fnstsw_ax();
+  void fwait();
+  void fnclex();
+
+  void frndint();
+
+  void sahf();
+  void setcc(Condition cc, Register reg);
+
+  void cpuid();
+
+  // SSE2 instructions
+  void cvttss2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, const Operand& src);
+
+  void cvtsi2sd(XMMRegister dst, const Operand& src);
+
+  void addsd(XMMRegister dst, XMMRegister src);
+  void subsd(XMMRegister dst, XMMRegister src);
+  void mulsd(XMMRegister dst, XMMRegister src);
+  void divsd(XMMRegister dst, XMMRegister src);
+
+  void comisd(XMMRegister dst, XMMRegister src);
+
+  // Use either movsd or movlpd.
+  void movdbl(XMMRegister dst, const Operand& src);
+  void movdbl(const Operand& dst, XMMRegister src);
+
+  // Debugging
+  void Print();
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  // Writes a single word of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+
+  int pc_offset() const  { return pc_ - buffer_; }
+  int current_statement_position() const { return current_statement_position_; }
+  int current_position() const  { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Avoid overflows for displacements etc.
+  static const int kMaximalBufferSize = 512*MB;
+  static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+  void movsd(XMMRegister dst, const Operand& src);
+  void movsd(const Operand& dst, XMMRegister src);
+
+  void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+  byte* addr_at(int pos)  { return buffer_ + pos; }
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  uint32_t long_at(int pos)  {
+    return *reinterpret_cast<uint32_t*>(addr_at(pos));
+  }
+  void long_at_put(int pos, uint32_t x)  {
+    *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+  }
+
+  // code emission
+  void GrowBuffer();
+  inline void emit(uint32_t x);
+  inline void emit(Handle<Object> handle);
+  inline void emit(uint32_t x, RelocInfo::Mode rmode);
+  inline void emit(const Immediate& x);
+  inline void emit_w(const Immediate& x);
+
+  // Emit the code-object-relative offset of the label's position
+  inline void emit_code_relative_offset(Label* label);
+
+  // instruction generation
+  void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+  // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+  // with a given destination expression and an immediate operand.  It attempts
+  // to use the shortest encoding possible.
+  // sel specifies the /n in the modrm byte (see the Intel PRM).
+  void emit_arith(int sel, Operand dst, const Immediate& x);
+
+  void emit_operand(Register reg, const Operand& adr);
+
+  void emit_farith(int b1, int b2, int i);
+
+  // labels
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+
+  // displacements
+  inline Displacement disp_at(Label* L);
+  inline void disp_at_put(Label* L, Displacement disp);
+  inline void emit_disp(Label* L, Displacement::Type type);
+
+  // record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class CodePatcher;
+  friend class EnsureSpace;
+
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+  // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+  static byte* spare_buffer_;
+
+  // code generation
+  byte* pc_;  // the program counter; moves forward
+  RelocInfoWriter reloc_info_writer;
+
+  // push-pop elimination
+  byte* last_pc_;
+
+  // source position information
+  int current_statement_position_;
+  int current_position_;
+  int written_statement_position_;
+  int written_position_;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information.  The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+    if (assembler_->overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+    space_before_ = assembler_->available_space();
+#endif
+  }
+
+#ifdef DEBUG
+  ~EnsureSpace() {
+    int bytes_generated = space_before_ - assembler_->available_space();
+    ASSERT(bytes_generated < assembler_->kGap);
+  }
+#endif
+
+ private:
+  Assembler* assembler_;
+#ifdef DEBUG
+  int space_before_;
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
new file mode 100644
index 0000000..ad44026
--- /dev/null
+++ b/src/ia32/builtins-ia32.cc
@@ -0,0 +1,1233 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+  // TODO(428): Don't pass the function in a static variable.
+  ExternalReference passed = ExternalReference::builtin_passed_function();
+  __ mov(Operand::StaticVariable(passed), edi);
+
+  // The actual argument count has already been loaded into register
+  // eax, but JumpToRuntime expects eax to contain the number of
+  // arguments including the receiver.
+  __ inc(eax);
+  __ JumpToRuntime(ExternalReference(id));
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- edi: constructor function
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that function is not a smi.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(Operand(ebx));
+
+  // edi: called object
+  // eax: number of arguments
+  __ bind(&non_function_call);
+
+  // Set expected number of arguments to zero (not changing eax).
+  __ Set(ebx, Immediate(0));
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+         RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Store a smi-tagged arguments count on the stack.
+  __ shl(eax, kSmiTagSize);
+  __ push(eax);
+
+  // Push the function to invoke on the stack.
+  __ push(edi);
+
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address();
+    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+    __ j(not_equal, &rt_call);
+#endif
+
+    // Verified that the constructor is a JSFunction.
+    // Load the initial map and verify that it is in fact a map.
+    // edi: constructor
+    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &rt_call);
+    // edi: constructor
+    // eax: initial map (if proven valid below)
+    __ CmpObjectType(eax, MAP_TYPE, ebx);
+    __ j(not_equal, &rt_call);
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // edi: constructor
+    // eax: initial map
+    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+    __ j(equal, &rt_call);
+
+    // Now allocate the JSObject on the heap.
+    // edi: constructor
+    // eax: initial map
+    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+    __ shl(edi, kPointerSizeLog2);
+    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+    // Allocated the JSObject, now initialize the fields.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+    __ mov(ecx, Factory::empty_fixed_array());
+    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+    // Set extra fields in the newly allocated object.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    { Label loop, entry;
+      __ mov(edx, Factory::undefined_value());
+      __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(ecx, 0), edx);
+      __ add(Operand(ecx), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(ecx, Operand(edi));
+      __ j(less, &loop);
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed.
+    // Allocate and initialize a FixedArray if it is.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    // Calculate the total number of properties described by the map.
+    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+    __ add(edx, Operand(ecx));
+    // Calculate unused properties past the end of the in-object properties.
+    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+    __ sub(edx, Operand(ecx));
+    // Done if no extra properties are to be allocated.
+    __ j(zero, &allocated);
+    __ Assert(positive, "Property allocation count failed.");
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // ebx: JSObject
+    // edi: start of next object (will be start of FixedArray)
+    // edx: number of elements in properties array
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          edx,
+                          edi,
+                          ecx,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
+
+    // Initialize the FixedArray.
+    // ebx: JSObject
+    // edi: FixedArray
+    // edx: number of elements
+    // ecx: start of next object
+    __ mov(eax, Factory::fixed_array_map());
+    __ mov(Operand(edi, JSObject::kMapOffset), eax);  // setup the map
+    __ mov(Operand(edi, Array::kLengthOffset), edx);  // and length
+
+    // Initialize the fields to undefined.
+    // ebx: JSObject
+    // edi: FixedArray
+    // ecx: start of next object
+    { Label loop, entry;
+      __ mov(edx, Factory::undefined_value());
+      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(eax, 0), edx);
+      __ add(Operand(eax), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(eax, Operand(ecx));
+      __ j(below, &loop);
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // ebx: JSObject
+    // edi: FixedArray
+    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
+    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+    // Continue with JSObject being successfully allocated
+    // ebx: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // ebx: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(ebx);
+  }
+
+  // Allocate the new receiver object using the runtime call.
+  __ bind(&rt_call);
+  // Must restore edi (constructor) before calling runtime.
+  __ mov(edi, Operand(esp, 0));
+  // edi: function (constructor)
+  __ push(edi);
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(ebx, Operand(eax));  // store result in ebx
+
+  // New object allocated.
+  // ebx: newly allocated object
+  __ bind(&allocated);
+  // Retrieve the function from the stack.
+  __ pop(edi);
+
+  // Retrieve smi-tagged arguments count from the stack.
+  __ mov(eax, Operand(esp, 0));
+  __ shr(eax, kSmiTagSize);
+
+  // Push the allocated receiver to the stack. We need two copies
+  // because we may have to return the original one and the calling
+  // conventions dictate that the called function pops the receiver.
+  __ push(ebx);
+  __ push(ebx);
+
+  // Setup pointer to last argument.
+  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+  // Copy arguments and receiver to the expression stack.
+  Label loop, entry;
+  __ mov(ecx, Operand(eax));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ push(Operand(ebx, ecx, times_4, 0));
+  __ bind(&entry);
+  __ dec(ecx);
+  __ j(greater_equal, &loop);
+
+  // Call the function.
+  ParameterCount actual(eax);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+
+  // Restore context from the frame.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &use_receiver, not_taken);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(greater_equal, &exit, not_taken);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ mov(eax, Operand(esp, 0));
+
+  // Restore the arguments count and leave the construct frame.
+  __ bind(&exit);
+  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
+  __ LeaveConstructFrame();
+
+  // Remove caller arguments from the stack and return.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ ret(0);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(esi, Operand(esi));  // clear esi
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Load the previous frame pointer (ebx) to access C arguments
+  __ mov(ebx, Operand(ebp, 0));
+
+  // Get the function from the frame and setup the context.
+  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+
+  // Push the function and the receiver onto the stack.
+  __ push(ecx);
+  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+  // Copy arguments to the stack in a loop.
+  Label loop, entry;
+  __ xor_(ecx, Operand(ecx));  // clear ecx
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+  __ push(Operand(edx, 0));  // dereference handle
+  __ inc(Operand(ecx));
+  __ bind(&entry);
+  __ cmp(ecx, Operand(eax));
+  __ j(not_equal, &loop);
+
+  // Get the function from the stack and call it.
+  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
+
+  // Invoke the code.
+  if (is_construct) {
+    __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+  }
+
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
+  __ ret(1 * kPointerSize);  // remove receiver
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  { Label done;
+    __ test(eax, Operand(eax));
+    __ j(not_zero, &done, taken);
+    __ pop(ebx);
+    __ push(Immediate(Factory::undefined_value()));
+    __ push(ebx);
+    __ inc(eax);
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call from the stack.
+  { Label done, non_function, function;
+    // +1 ~ return address.
+    __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
+    __ test(edi, Immediate(kSmiTagMask));
+    __ j(zero, &non_function, not_taken);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(equal, &function, taken);
+
+    // Non-function called: Clear the function to force exception.
+    __ bind(&non_function);
+    __ xor_(edi, Operand(edi));
+    __ jmp(&done);
+
+    // Function called: Change context eagerly to get the right global object.
+    __ bind(&function);
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+    __ bind(&done);
+  }
+
+  // 3. Make sure first argument is an object; convert if necessary.
+  { Label call_to_object, use_global_receiver, patch_receiver, done;
+    __ mov(ebx, Operand(esp, eax, times_4, 0));
+
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ j(zero, &call_to_object);
+
+    __ cmp(ebx, Factory::null_value());
+    __ j(equal, &use_global_receiver);
+    __ cmp(ebx, Factory::undefined_value());
+    __ j(equal, &use_global_receiver);
+
+    __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+    __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+    __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+    __ j(less, &call_to_object);
+    __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+    __ j(less_equal, &done);
+
+    __ bind(&call_to_object);
+    __ EnterInternalFrame();  // preserves eax, ebx, edi
+
+    // Store the arguments count on the stack (smi tagged).
+    ASSERT(kSmiTag == 0);
+    __ shl(eax, kSmiTagSize);
+    __ push(eax);
+
+    __ push(edi);  // save edi across the call
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ pop(edi);  // restore edi after the call
+
+    // Get the arguments count and untag it.
+    __ pop(eax);
+    __ shr(eax, kSmiTagSize);
+
+    __ LeaveInternalFrame();
+    __ jmp(&patch_receiver);
+
+    // Use the global receiver object from the called function as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ mov(ebx, FieldOperand(esi, kGlobalIndex));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ mov(Operand(esp, eax, times_4, 0), ebx);
+
+    __ bind(&done);
+  }
+
+  // 4. Shift stuff one slot down the stack.
+  { Label loop;
+    __ lea(ecx, Operand(eax, +1));  // +1 ~ copy receiver too
+    __ bind(&loop);
+    __ mov(ebx, Operand(esp, ecx, times_4, 0));
+    __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+    __ dec(ecx);
+    __ j(not_zero, &loop);
+  }
+
+  // 5. Remove TOS (copy of last arguments), but keep return address.
+  __ pop(ebx);
+  __ pop(ecx);
+  __ push(ebx);
+  __ dec(eax);
+
+  // 6. Check that function really was a function and get the code to
+  //    call from the function and check that the number of expected
+  //    arguments matches what we're providing.
+  { Label invoke;
+    __ test(edi, Operand(edi));
+    __ j(not_zero, &invoke, taken);
+    __ xor_(ebx, Operand(ebx));
+    __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+    __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+           RelocInfo::CODE_TARGET);
+
+    __ bind(&invoke);
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ebx,
+           FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+    __ cmp(eax, Operand(ebx));
+    __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+  }
+
+  // 7. Jump (tail-call) to the code in register edx without checking arguments.
+  ParameterCount expected(0);
+  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  __ EnterInternalFrame();
+
+  __ push(Operand(ebp, 4 * kPointerSize));  // push this
+  __ push(Operand(ebp, 2 * kPointerSize));  // push arguments
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  if (FLAG_check_stack) {
+    // We need to catch preemptions right here, otherwise an unlucky preemption
+    // could show up as a failed apply.
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    Label retry_preemption;
+    Label no_preemption;
+    __ bind(&retry_preemption);
+    __ mov(edi, Operand::StaticVariable(stack_guard_limit));
+    __ cmp(esp, Operand(edi));
+    __ j(above, &no_preemption, taken);
+
+    // Preemption!
+    // Because builtins always remove the receiver from the stack, we
+    // have to fake one to avoid underflowing the stack.
+    __ push(eax);
+    __ push(Immediate(Smi::FromInt(0)));
+
+    // Do call to runtime routine.
+    __ CallRuntime(Runtime::kStackGuard, 1);
+    __ pop(eax);
+    __ jmp(&retry_preemption);
+
+    __ bind(&no_preemption);
+
+    Label okay;
+    // Make ecx the space we have left.
+    __ mov(ecx, Operand(esp));
+    __ sub(ecx, Operand(edi));
+    // Make edx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ mov(edx, Operand(eax));
+    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+    __ cmp(ecx, Operand(edx));
+    __ j(greater, &okay, taken);
+
+    // Too bad: Out of stack space.
+    __ push(Operand(ebp, 4 * kPointerSize));  // push this
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+  }
+
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(eax);  // limit
+  __ push(Immediate(0));  // index
+
+  // Change context eagerly to get the right global object if
+  // necessary.
+  __ mov(edi, Operand(ebp, 4 * kPointerSize));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ mov(ebx, Operand(ebp, 3 * kPointerSize));
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &call_to_object);
+  __ cmp(ebx, Factory::null_value());
+  __ j(equal, &use_global_receiver);
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(equal, &use_global_receiver);
+
+  // If given receiver is already a JavaScript object then there's no
+  // reason for converting it.
+  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &call_to_object);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(less_equal, &push_receiver);
+
+  // Convert the receiver to an object.
+  __ bind(&call_to_object);
+  __ push(ebx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(ebx, Operand(eax));
+  __ jmp(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(ebx);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(ecx, Operand(ebp, 2 * kPointerSize));  // load arguments
+  __ push(ecx);
+  __ push(eax);
+
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
+
+  // Remove IC arguments from the stack and push the nth argument.
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ push(eax);
+
+  // Update the index on the stack and in register eax.
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+  __ mov(Operand(ebp, kIndexOffset), eax);
+
+  __ bind(&entry);
+  __ cmp(eax, Operand(ebp, kLimitOffset));
+  __ j(not_equal, &loop);
+
+  // Invoke the function.
+  ParameterCount actual(eax);
+  __ shr(eax, kSmiTagSize);
+  __ mov(edi, Operand(ebp, 4 * kPointerSize));
+  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ mov(result,
+         Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity >= 0);
+
+  // Load the initial map from the array function.
+  __ mov(scratch1, FieldOperand(array_function,
+                                JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
+  __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
+         Factory::empty_fixed_array());
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+  // If no storage is requested for the elements array just set the empty
+  // fixed array.
+  if (initial_capacity == 0) {
+    __ mov(FieldOperand(result, JSArray::kElementsOffset),
+           Factory::empty_fixed_array());
+    return;
+  }
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, Operand(result, JSArray::kSize));
+  __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array
+  // scratch2: start of next object
+  __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+         Factory::fixed_array_map());
+  __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+         Immediate(initial_capacity));
+
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+  static const int kLoopUnfoldLimit = 4;
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    // Use a scratch register here to have only one reloc info when unfolding
+    // the loop.
+    __ mov(scratch3, Factory::the_hole_value());
+    for (int i = 0; i < initial_capacity; i++) {
+      __ mov(FieldOperand(scratch1,
+                          FixedArray::kHeaderSize + i * kPointerSize),
+             scratch3);
+    }
+  } else {
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+    __ add(Operand(scratch1), Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmp(scratch1, Operand(scratch2));
+    __ j(below, &loop);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end  (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array,
+                            Register elements_array_end,
+                            Register scratch,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ mov(elements_array,
+         FieldOperand(array_function,
+                      JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ test(array_size, Operand(array_size));
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,  // array_size is a smi.
+                        array_size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array: initial map
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
+  __ mov(elements_array, Factory::empty_fixed_array());
+  __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ lea(elements_array, Operand(result, JSArray::kSize));
+  __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+  // Initialize the fixed array. FixedArray length is not stored as a smi.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ shr(array_size, kSmiTagSize);  // Convert from smi to value.
+  __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+         Factory::fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ test(array_size, Operand(array_size));
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ mov(FieldOperand(elements_array, Array::kLengthOffset),
+         Immediate(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
+  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+  // same.
+  __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  __ bind(&fill_array);
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ mov(scratch, Factory::the_hole_value());
+    __ lea(elements_array, Operand(elements_array,
+                                   FixedArray::kHeaderSize - kHeapObjectTag));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(Operand(elements_array, 0), scratch);
+    __ add(Operand(elements_array), Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmp(elements_array, Operand(elements_array_end));
+    __ j(below, &loop);
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   edi: constructor (built-in Array function)
+//   eax: argc
+//   esp[0]: return address
+//   esp[4]: last argument
+// This function is used for both construct and normal calls of Array. Whether
+// it is a construct call or not is indicated by the construct_call parameter.
+// The only difference between handling a construct call and a normal call is
+// that for a construct call the constructor function in edi needs to be
+// preserved for entering the generic code. In both cases argc in eax needs to
+// be preserved.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            bool construct_call,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+
+  // Push the constructor and argc. No need to tag argc as a smi, as there will
+  // be no garbage collection with this on the stack.
+  int push_count = 0;
+  if (construct_call) {
+    push_count++;
+    __ push(edi);
+  }
+  push_count++;
+  __ push(eax);
+
+  // Check for array construction with zero arguments.
+  __ test(eax, Operand(eax));
+  __ j(not_zero, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       edi,
+                       eax,
+                       ebx,
+                       ecx,
+                       edi,
+                       kPreallocatedArrayElements,
+                       &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ ret(kPointerSize);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmp(eax, 1);
+  __ j(not_equal, &argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ test(Operand(esp, (push_count + 1) * kPointerSize),
+          Immediate(kIntptrSignBit | kSmiTagMask));
+  __ j(not_zero, &prepare_generic_code_call);
+
+  // Handle construction of an empty array of a certain size. Get the size from
+  // the stack and bail out if size is to large to actually allocate an elements
+  // array.
+  __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
+  ASSERT(kSmiTag == 0);
+  __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+  __ j(greater_equal, &prepare_generic_code_call);
+
+  // edx: array_size (smi)
+  // edi: constructor
+  // esp[0]: argc
+  // esp[4]: constructor (only if construct_call)
+  // esp[8]: return address
+  // esp[C]: argument
+  AllocateJSArray(masm,
+                  edi,
+                  edx,
+                  eax,
+                  ebx,
+                  ecx,
+                  edi,
+                  true,
+                  &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ ret(2 * kPointerSize);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ shl(eax, kSmiTagSize);  // Convet argc to a smi.
+  // eax: array_size (smi)
+  // edi: constructor
+  // esp[0] : argc
+  // esp[4]: constructor (only if construct_call)
+  // esp[8] : return address
+  // esp[C] : last argument
+  AllocateJSArray(masm,
+                  edi,
+                  eax,
+                  ebx,
+                  ecx,
+                  edx,
+                  edi,
+                  false,
+                  &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ mov(eax, ebx);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ push(eax);
+  // eax: JSArray
+  // ebx: argc
+  // edx: elements_array_end (untagged)
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+
+  // Location of the last argument
+  __ lea(edi, Operand(esp, 2 * kPointerSize));
+
+  // Location of the first array element (Parameter fill_with_holes to
+  // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+  __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // ebx: argc
+  // edx: location of the first array element
+  // edi: location of the last argument
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+  Label loop, entry;
+  __ mov(ecx, ebx);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+  __ mov(Operand(edx, 0), eax);
+  __ add(Operand(edx), Immediate(kPointerSize));
+  __ bind(&entry);
+  __ dec(ecx);
+  __ j(greater_equal, &loop);
+
+  // Remove caller arguments from the stack and return.
+  // ebx: argc
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+  __ pop(eax);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+  __ push(ecx);
+  __ ret(0);
+
+  // Restore argc and constructor before running the generic code.
+  __ bind(&prepare_generic_code_call);
+  __ pop(eax);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ jmp(call_generic_code);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected initial map for Array function");
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, false, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+  Handle<Code> array_code(code);
+  __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- edi : constructor
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // does always have a map.
+    GenerateLoadArrayFunction(masm, ebx);
+    __ cmp(edi, Operand(ebx));
+    __ Assert(equal, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected initial map for Array function");
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as constructor.
+  ArrayNativeCode(masm, true, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(ebp);
+  __ mov(ebp, Operand(esp));
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Push the function on the stack.
+  __ push(edi);
+
+  // Preserve the number of arguments on the stack. Must preserve both
+  // eax and ebx because these registers are used when copying the
+  // arguments and the receiver.
+  ASSERT(kSmiTagSize == 1);
+  __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
+  __ push(ecx);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack.
+  __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ leave();
+
+  // Remove caller arguments from the stack.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+  __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+  Label enough, too_few;
+  __ cmp(eax, Operand(ebx));
+  __ j(less, &too_few);
+  __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(eax, Operand(ebp, eax, times_4, offset));
+    __ mov(ecx, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(ecx);
+    __ push(Operand(eax, 0));
+    __ sub(Operand(eax), Immediate(kPointerSize));
+    __ cmp(ecx, Operand(ebx));
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    __ mov(ecx, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(ecx);
+    __ push(Operand(edi, 0));
+    __ sub(Operand(edi), Immediate(kPointerSize));
+    __ cmp(ecx, Operand(eax));
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ bind(&fill);
+    __ inc(ecx);
+    __ push(Immediate(Factory::undefined_value()));
+    __ cmp(ecx, Operand(ebx));
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ call(Operand(edx));
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ jmp(Operand(edx));
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
new file mode 100644
index 0000000..44e937a
--- /dev/null
+++ b/src/ia32/codegen-ia32-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_IA32_CODEGEN_IA32_INL_H_
+#define V8_IA32_CODEGEN_IA32_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
new file mode 100644
index 0000000..0e314b9
--- /dev/null
+++ b/src/ia32/codegen-ia32.cc
@@ -0,0 +1,7979 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "ic-inl.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
+    }
+  }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      typeof_state_(NOT_INSIDE_TYPEOF),
+      destination_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           TypeofState typeof_state,
+                           ControlDestination* destination)
+    : owner_(owner),
+      typeof_state_(typeof_state),
+      destination_(destination),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(int buffer_size,
+                             Handle<Script> script,
+                             bool is_eval)
+    : is_eval_(is_eval),
+      script_(script),
+      deferred_(8),
+      masm_(new MacroAssembler(NULL, buffer_size)),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      state_(NULL),
+      loop_nesting_(0),
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
+}
+
+
+// Calling conventions:
+// ebp: caller's frame pointer
+// esp: stack pointer
+// edi: called JS function
+// esi: callee's context
+
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(fun);
+
+  ZoneList<Statement*>* body = fun->body();
+
+  // Initialize state.
+  ASSERT(scope_ == NULL);
+  scope_ = fun->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  set_in_spilled_code(false);
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ += fun->loop_nesting();
+
+  JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    frame_->SpillAll();
+    __ int3();
+  }
+#endif
+
+  // New scope to get automatic timing calculation.
+  {  // NOLINT
+    HistogramTimerScope codegen_timer(&Counters::code_generation);
+    CodeGenState state(this);
+
+    // Entry:
+    // Stack: receiver, arguments, return address.
+    // ebp: caller's frame pointer
+    // esp: stack pointer
+    // edi: called JS function
+    // esi: callee's context
+    allocator_->Initialize();
+    frame_->Enter();
+
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots();
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    // Allocate the local context if needed.
+    if (scope_->num_heap_slots() > 0) {
+      Comment cmnt(masm_, "[ allocate local context");
+      // Allocate local context.
+      // Get outer context and create a new context based on it.
+      frame_->PushFunction();
+      Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+
+      // Update context local.
+      frame_->SaveContextRegister();
+
+      // Verify that the runtime call result and esi agree.
+      if (FLAG_debug_code) {
+        __ cmp(context.reg(), Operand(esi));
+        __ Assert(equal, "Runtime::NewContext should end up in esi");
+      }
+    }
+
+    // TODO(1241774): Improve this code:
+    // 1) only needed if we have a context
+    // 2) no need to recompute context ptr every single time
+    // 3) don't copy parameter operand code from SlotOperand!
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        Variable* par = scope_->parameter(i);
+        Slot* slot = par->slot();
+        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+          // The use of SlotOperand below is safe in unspilled code
+          // because the slot is guaranteed to be a context slot.
+          //
+          // There are no parameters in the global scope.
+          ASSERT(!scope_->is_global_scope());
+          frame_->PushParameterAt(i);
+          Result value = frame_->Pop();
+          value.ToRegister();
+
+          // SlotOperand loads context.reg() with the context object
+          // stored to, used below in RecordWrite.
+          Result context = allocator_->Allocate();
+          ASSERT(context.is_valid());
+          __ mov(SlotOperand(slot, context.reg()), value.reg());
+          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+          Result scratch = allocator_->Allocate();
+          ASSERT(scratch.is_valid());
+          frame_->Spill(context.reg());
+          frame_->Spill(value.reg());
+          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+        }
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in
+    // the context.
+    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+      StoreArgumentsObject(true);
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope_->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope_->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
+      // Ignore the return value.
+    }
+    CheckStack();
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
+        // Ignore the return value.
+      }
+#endif
+      VisitStatements(body);
+
+      // Handle the return from the function.
+      if (has_valid_frame()) {
+        // If there is a valid frame, control flow can fall off the end of
+        // the body.  In that case there is an implicit return statement.
+        ASSERT(!function_return_is_shadowed_);
+        CodeForReturnPosition(fun);
+        frame_->PrepareForReturn();
+        Result undefined(Factory::undefined_value());
+        if (function_return_.is_bound()) {
+          function_return_.Jump(&undefined);
+        } else {
+          function_return_.Bind(&undefined);
+          GenerateReturnSequence(&undefined);
+        }
+      } else if (function_return_.is_linked()) {
+        // If the return target has dangling jumps to it, then we have not
+        // yet generated the return sequence.  This can happen when (a)
+        // control does not flow off the end of the body so we did not
+        // compile an artificial return statement just above, and (b) there
+        // are return statements in the body but (c) they are all shadowed.
+        Result return_value;
+        function_return_.Bind(&return_value);
+        GenerateReturnSequence(&return_value);
+      }
+    }
+  }
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ -= fun->loop_nesting();
+
+  // Code generation state must be reset.
+  ASSERT(state_ == NULL);
+  ASSERT(loop_nesting() == 0);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+    JumpTarget::set_compiling_deferred_code(true);
+    ProcessDeferred();
+    JumpTarget::set_compiling_deferred_code(false);
+  }
+
+  // There is no need to delete the register allocator, it is a
+  // stack-allocated local.
+  allocator_ = NULL;
+  scope_ = NULL;
+}
+
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      return frame_->ParameterAt(index);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      // Follow the context chain if necessary.
+      ASSERT(!tmp.is(esi));  // do not overwrite context register
+      Register context = esi;
+      int chain_length = scope()->ContextChainLength(slot->var()->scope());
+      for (int i = 0; i < chain_length; i++) {
+        // Load the closure.
+        // (All contexts, even 'with' contexts, have a closure,
+        // and it is the same for all contexts inside a function.
+        // There is no need to go to the function context first.)
+        __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+        // Load the function context (which is the incoming, outer context).
+        __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+        context = tmp;
+      }
+      // We may have a 'with' context now. Get the function context.
+      // (In fact this mov may never be the needed, since the scope analysis
+      // may not permit a direct context access in this case and thus we are
+      // always at a function context. However it is safe to dereference be-
+      // cause the function context of a function context is itself. Before
+      // deleting this mov we should try to create a counter-example first,
+      // though...)
+      __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+      return ContextOperand(tmp, index);
+    }
+
+    default:
+      UNREACHABLE();
+      return Operand(eax);
+  }
+}
+
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+                                                         Result tmp,
+                                                         JumpTarget* slow) {
+  ASSERT(slot->type() == Slot::CONTEXT);
+  ASSERT(tmp.is_register());
+  Register context = esi;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+  }
+  // Check that last extension is NULL.
+  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+  slow->Branch(not_equal, not_taken);
+  __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+  return ContextOperand(tmp.reg(), slot->index());
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  TypeofState typeof_state,
+                                  ControlDestination* dest,
+                                  bool force_control) {
+  ASSERT(!in_spilled_code());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, typeof_state, dest);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        !dest->is_used() &&
+        frame_->height() == original_height) {
+      dest->Goto(true);
+    }
+  }
+
+  if (force_control && !dest->is_used()) {
+    // Convert the TOS value into flow to the control destination.
+    ToBoolean(dest);
+  }
+
+  ASSERT(!(force_control && !dest->is_used()));
+  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+                                 TypeofState typeof_state) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Load(expression, typeof_state);
+  frame_->SpillAll();
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  JumpTarget true_target;
+  JumpTarget false_target;
+  ControlDestination dest(&true_target, &false_target, true);
+  LoadCondition(x, typeof_state, &dest, false);
+
+  if (dest.false_was_fall_through()) {
+    // The false target was just bound.
+    JumpTarget loaded;
+    frame_->Push(Factory::false_value());
+    // There may be dangling jumps to the true target.
+    if (true_target.is_linked()) {
+      loaded.Jump();
+      true_target.Bind();
+      frame_->Push(Factory::true_value());
+      loaded.Bind();
+    }
+
+  } else if (dest.is_used()) {
+    // There is true, and possibly false, control flow (with true as
+    // the fall through).
+    JumpTarget loaded;
+    frame_->Push(Factory::true_value());
+    if (false_target.is_linked()) {
+      loaded.Jump();
+      false_target.Bind();
+      frame_->Push(Factory::false_value());
+      loaded.Bind();
+    }
+
+  } else {
+    // We have a valid value on top of the frame, but we still may
+    // have dangling jumps to the true and false targets from nested
+    // subexpressions (eg, the left subexpressions of the
+    // short-circuited boolean operators).
+    ASSERT(has_valid_frame());
+    if (true_target.is_linked() || false_target.is_linked()) {
+      JumpTarget loaded;
+      loaded.Jump();  // Don't lose the current TOS.
+      if (true_target.is_linked()) {
+        true_target.Bind();
+        frame_->Push(Factory::true_value());
+        if (false_target.is_linked()) {
+          loaded.Jump();
+        }
+      }
+      if (false_target.is_linked()) {
+        false_target.Bind();
+        frame_->Push(Factory::false_value());
+      }
+      loaded.Bind();
+    }
+  }
+
+  ASSERT(has_valid_frame());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  if (in_spilled_code()) {
+    frame_->EmitPush(GlobalObject());
+  } else {
+    Result temp = allocator_->Allocate();
+    __ mov(temp.reg(), GlobalObject());
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+  Result temp = allocator_->Allocate();
+  Register reg = temp.reg();
+  __ mov(reg, GlobalObject());
+  __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+  frame_->Push(&temp);
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  Variable* variable = x->AsVariableProxy()->AsVariable();
+  if (variable != NULL && !variable->is_this() && variable->is_global()) {
+    // NOTE: This is somewhat nasty. We force the compiler to load
+    // the variable as if through '<global>.<variable>' to make sure we
+    // do not get reference errors.
+    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+    Literal key(variable->name());
+    // TODO(1241834): Fetch the position from the variable instead of using
+    // no position.
+    Property property(&global, &key, RelocInfo::kNoPosition);
+    Load(&property);
+  } else {
+    Load(x, INSIDE_TYPEOF);
+  }
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope_->arguments_shadow() != NULL);
+  // We don't want to do lazy arguments allocation for functions that
+  // have heap-allocated contexts, because it interfers with the
+  // uninitialized const tracking in the context objects.
+  return (scope_->num_heap_slots() > 0)
+      ? EAGER_ARGUMENTS_ALLOCATION
+      : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+  ArgumentsAllocationMode mode = ArgumentsMode();
+  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+  Comment cmnt(masm_, "[ store arguments object");
+  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+    // When using lazy arguments allocation, we store the hole value
+    // as a sentinel indicating that the arguments object hasn't been
+    // allocated yet.
+    frame_->Push(Factory::the_hole_value());
+  } else {
+    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    frame_->PushFunction();
+    frame_->PushReceiverSlotAddress();
+    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    Result result = frame_->CallStub(&stub, 3);
+    frame_->Push(&result);
+  }
+
+  { Reference shadow_ref(this, scope_->arguments_shadow());
+    Reference arguments_ref(this, scope_->arguments());
+    ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+    // Here we rely on the convenient property that references to slot
+    // take up zero space in the frame (ie, it doesn't matter that the
+    // stored value is actually below the reference on the frame).
+    JumpTarget done;
+    bool skip_arguments = false;
+    if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+      // We have to skip storing into the arguments slot if it has
+      // already been written to. This can happen if the a function
+      // has a local variable named 'arguments'.
+      LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+      Result arguments = frame_->Pop();
+      if (arguments.is_constant()) {
+        // We have to skip updating the arguments object if it has
+        // been assigned a proper value.
+        skip_arguments = !arguments.handle()->IsTheHole();
+      } else {
+        __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
+        arguments.Unuse();
+        done.Branch(not_equal);
+      }
+    }
+    if (!skip_arguments) {
+      arguments_ref.SetValue(NOT_CONST_INIT);
+      if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+    }
+    shadow_ref.SetValue(NOT_CONST_INIT);
+  }
+  return frame_->Pop();
+}
+
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+  cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+  cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  // References are loaded from both spilled and unspilled code.  Set the
+  // state to unspilled to allow that (and explicitly spill after
+  // construction at the construction sites).
+  bool was_in_spilled_code = in_spilled_code_;
+  in_spilled_code_ = false;
+
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    // The expression is either a property or a variable proxy that rewrites
+    // to a property.
+    Load(property->obj());
+    // We use a named reference if the key is a literal symbol, unless it is
+    // a string that can be legally parsed as an integer.  This is because
+    // otherwise we will not get into the slow case code that handles [] on
+    // String objects.
+    Literal* literal = property->key()->AsLiteral();
+    uint32_t dummy;
+    if (literal != NULL &&
+        literal->handle()->IsSymbol() &&
+        !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+      ref->set_type(Reference::NAMED);
+    } else {
+      Load(property->key());
+      ref->set_type(Reference::KEYED);
+    }
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    // Anything else is a runtime error.
+    Load(e);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+  }
+
+  in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  frame_->Nip(ref->size());
+}
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+  Comment cmnt(masm_, "[ ToBoolean");
+
+  // The value to convert should be popped from the frame.
+  Result value = frame_->Pop();
+  value.ToRegister();
+  // Fast case checks.
+
+  // 'false' => false.
+  __ cmp(value.reg(), Factory::false_value());
+  dest->false_target()->Branch(equal);
+
+  // 'true' => true.
+  __ cmp(value.reg(), Factory::true_value());
+  dest->true_target()->Branch(equal);
+
+  // 'undefined' => false.
+  __ cmp(value.reg(), Factory::undefined_value());
+  dest->false_target()->Branch(equal);
+
+  // Smi => false iff zero.
+  ASSERT(kSmiTag == 0);
+  __ test(value.reg(), Operand(value.reg()));
+  dest->false_target()->Branch(zero);
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  dest->true_target()->Branch(zero);
+
+  // Call the stub for all other cases.
+  frame_->Push(&value);  // Undo the Pop() from above.
+  ToBooleanStub stub;
+  Result temp = frame_->CallStub(&stub, 1);
+  // Convert the result to a condition code.
+  __ test(temp.reg(), Operand(temp.reg()));
+  temp.Unuse();
+  dest->Split(not_equal);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  // Code pattern for loading a floating point value. Input value must
+  // be either a smi or a heap number object (fp value). Requirements:
+  // operand in register number. Returns operand as floating point number
+  // on FPU stack.
+  static void LoadFloatOperand(MacroAssembler* masm, Register number);
+  // Code pattern for loading floating point values. Input values must
+  // be either smi or heap number objects (fp values). Requirements:
+  // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+  // floating point numbers on FPU stack.
+  static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+  // Test if operands are smi or number objects (fp). Requirements:
+  // operand_1 in eax, operand_2 in edx; falls through on float
+  // operands, jumps to the non_float label otherwise.
+  static void CheckFloatOperands(MacroAssembler* masm,
+                                 Label* non_float,
+                                 Register scratch);
+  // Test if operands are numbers (smi or HeapNumber objects), and load
+  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
+  // either operand is not a number.  Operands are in edx and eax.
+  // Leaves operands unchanged.
+  static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+  // Allocate a heap number in new space with undefined value.
+  // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
+  static void AllocateHeapNumber(MacroAssembler* masm,
+                                 Label* need_gc,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register result);
+};
+
+
+const char* GenericBinaryOpStub::GetName() {
+  switch (op_) {
+    case Token::ADD: return "GenericBinaryOpStub_ADD";
+    case Token::SUB: return "GenericBinaryOpStub_SUB";
+    case Token::MUL: return "GenericBinaryOpStub_MUL";
+    case Token::DIV: return "GenericBinaryOpStub_DIV";
+    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+    case Token::SAR: return "GenericBinaryOpStub_SAR";
+    case Token::SHL: return "GenericBinaryOpStub_SHL";
+    case Token::SHR: return "GenericBinaryOpStub_SHR";
+    default:         return "GenericBinaryOpStub";
+  }
+}
+
+
+// Call the specialized stub for a binary operation.
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+  DeferredInlineBinaryOperation(Token::Value op,
+                                Register dst,
+                                Register left,
+                                Register right,
+                                OverwriteMode mode)
+      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+    set_comment("[ DeferredInlineBinaryOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register left_;
+  Register right_;
+  OverwriteMode mode_;
+};
+
+
+void DeferredInlineBinaryOperation::Generate() {
+  __ push(left_);
+  __ push(right_);
+  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           SmiAnalysis* type,
+                                           OverwriteMode overwrite_mode) {
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Comment cmnt_token(masm_, Token::String(op));
+
+  if (op == Token::COMMA) {
+    // Simply discard left value.
+    frame_->Nip(1);
+    return;
+  }
+
+  // Set the flags based on the operation, type and loop nesting level.
+  GenericBinaryFlags flags;
+  switch (op) {
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Bit operations always assume they likely operate on Smis. Still only
+      // generate the inline Smi check code if this operation is part of a loop.
+      flags = (loop_nesting() > 0)
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+
+    default:
+      // By default only inline the Smi check code for likely smis if this
+      // operation is part of a loop.
+      flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+  }
+
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+
+  if (op == Token::ADD) {
+    bool left_is_string = left.is_constant() && left.handle()->IsString();
+    bool right_is_string = right.is_constant() && right.handle()->IsString();
+    if (left_is_string || right_is_string) {
+      frame_->Push(&left);
+      frame_->Push(&right);
+      Result answer;
+      if (left_is_string) {
+        if (right_is_string) {
+          // TODO(lrn): if both are constant strings
+          // -- do a compile time cons, if allocation during codegen is allowed.
+          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+        } else {
+          answer =
+            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+        }
+      } else if (right_is_string) {
+        answer =
+          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+      }
+      frame_->Push(&answer);
+      return;
+    }
+    // Neither operand is known to be a string.
+  }
+
+  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool generate_no_smi_code = false;  // No smi code at all, inline or in stub.
+
+  if (left_is_smi && right_is_smi) {
+    // Compute the constant result at compile time, and leave it on the frame.
+    int left_int = Smi::cast(*left.handle())->value();
+    int right_int = Smi::cast(*right.handle())->value();
+    if (FoldConstantSmis(op, left_int, right_int)) return;
+  }
+
+  if (left_is_non_smi || right_is_non_smi) {
+    // Set flag so that we go straight to the slow case, with no smi code.
+    generate_no_smi_code = true;
+  } else if (right_is_smi) {
+    ConstantSmiBinaryOperation(op, &left, right.handle(),
+                               type, false, overwrite_mode);
+    return;
+  } else if (left_is_smi) {
+    ConstantSmiBinaryOperation(op, &right, left.handle(),
+                               type, true, overwrite_mode);
+    return;
+  }
+
+  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+    LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+  } else {
+    frame_->Push(&left);
+    frame_->Push(&right);
+    // If we know the arguments aren't smis, use the binary operation stub
+    // that does not check for the fast smi case.
+    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+    if (generate_no_smi_code) {
+      flags = SMI_CODE_INLINED;
+    }
+    GenericBinaryOpStub stub(op, overwrite_mode, flags);
+    Result answer = frame_->CallStub(&stub, 2);
+    frame_->Push(&answer);
+  }
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+  Object* answer_object = Heap::undefined_value();
+  switch (op) {
+    case Token::ADD:
+      if (Smi::IsValid(left + right)) {
+        answer_object = Smi::FromInt(left + right);
+      }
+      break;
+    case Token::SUB:
+      if (Smi::IsValid(left - right)) {
+        answer_object = Smi::FromInt(left - right);
+      }
+      break;
+    case Token::MUL: {
+        double answer = static_cast<double>(left) * right;
+        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+          // If the product is zero and the non-zero factor is negative,
+          // the spec requires us to return floating point negative zero.
+          if (answer != 0 || (left >= 0 && right >= 0)) {
+            answer_object = Smi::FromInt(static_cast<int>(answer));
+          }
+        }
+      }
+      break;
+    case Token::DIV:
+    case Token::MOD:
+      break;
+    case Token::BIT_OR:
+      answer_object = Smi::FromInt(left | right);
+      break;
+    case Token::BIT_AND:
+      answer_object = Smi::FromInt(left & right);
+      break;
+    case Token::BIT_XOR:
+      answer_object = Smi::FromInt(left ^ right);
+      break;
+
+    case Token::SHL: {
+        int shift_amount = right & 0x1F;
+        if (Smi::IsValid(left << shift_amount)) {
+          answer_object = Smi::FromInt(left << shift_amount);
+        }
+        break;
+      }
+    case Token::SHR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        unsigned_left >>= shift_amount;
+        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+          answer_object = Smi::FromInt(unsigned_left);
+        }
+        break;
+      }
+    case Token::SAR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        if (left < 0) {
+          // Perform arithmetic shift of a negative number by
+          // complementing number, logical shifting, complementing again.
+          unsigned_left = ~unsigned_left;
+          unsigned_left >>= shift_amount;
+          unsigned_left = ~unsigned_left;
+        } else {
+          unsigned_left >>= shift_amount;
+        }
+        ASSERT(Smi::IsValid(unsigned_left));  // Converted to signed.
+        answer_object = Smi::FromInt(unsigned_left);  // Converted to signed.
+        break;
+      }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  if (answer_object == Heap::undefined_value()) {
+    return false;
+  }
+  frame_->Push(Handle<Object>(answer_object));
+  return true;
+}
+
+
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+                                             Result* left,
+                                             Result* right,
+                                             OverwriteMode overwrite_mode) {
+  // Special handling of div and mod because they use fixed registers.
+  if (op == Token::DIV || op == Token::MOD) {
+    // We need eax as the quotient register, edx as the remainder
+    // register, neither left nor right in eax or edx, and left copied
+    // to eax.
+    Result quotient;
+    Result remainder;
+    bool left_is_in_eax = false;
+    // Step 1: get eax for quotient.
+    if ((left->is_register() && left->reg().is(eax)) ||
+        (right->is_register() && right->reg().is(eax))) {
+      // One or both is in eax.  Use a fresh non-edx register for
+      // them.
+      Result fresh = allocator_->Allocate();
+      ASSERT(fresh.is_valid());
+      if (fresh.reg().is(edx)) {
+        remainder = fresh;
+        fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+      }
+      if (left->is_register() && left->reg().is(eax)) {
+        quotient = *left;
+        *left = fresh;
+        left_is_in_eax = true;
+      }
+      if (right->is_register() && right->reg().is(eax)) {
+        quotient = *right;
+        *right = fresh;
+      }
+      __ mov(fresh.reg(), eax);
+    } else {
+      // Neither left nor right is in eax.
+      quotient = allocator_->Allocate(eax);
+    }
+    ASSERT(quotient.is_register() && quotient.reg().is(eax));
+    ASSERT(!(left->is_register() && left->reg().is(eax)));
+    ASSERT(!(right->is_register() && right->reg().is(eax)));
+
+    // Step 2: get edx for remainder if necessary.
+    if (!remainder.is_valid()) {
+      if ((left->is_register() && left->reg().is(edx)) ||
+          (right->is_register() && right->reg().is(edx))) {
+        Result fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+        if (left->is_register() && left->reg().is(edx)) {
+          remainder = *left;
+          *left = fresh;
+        }
+        if (right->is_register() && right->reg().is(edx)) {
+          remainder = *right;
+          *right = fresh;
+        }
+        __ mov(fresh.reg(), edx);
+      } else {
+        // Neither left nor right is in edx.
+        remainder = allocator_->Allocate(edx);
+      }
+    }
+    ASSERT(remainder.is_register() && remainder.reg().is(edx));
+    ASSERT(!(left->is_register() && left->reg().is(edx)));
+    ASSERT(!(right->is_register() && right->reg().is(edx)));
+
+    left->ToRegister();
+    right->ToRegister();
+    frame_->Spill(eax);
+    frame_->Spill(edx);
+
+    // Check that left and right are smi tagged.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          (op == Token::DIV) ? eax : edx,
+                                          left->reg(),
+                                          right->reg(),
+                                          overwrite_mode);
+    if (left->reg().is(right->reg())) {
+      __ test(left->reg(), Immediate(kSmiTagMask));
+    } else {
+      // Use the quotient register as a scratch for the tag check.
+      if (!left_is_in_eax) __ mov(eax, left->reg());
+      left_is_in_eax = false;  // About to destroy the value in eax.
+      __ or_(eax, Operand(right->reg()));
+      ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+      __ test(eax, Immediate(kSmiTagMask));
+    }
+    deferred->Branch(not_zero);
+
+    if (!left_is_in_eax) __ mov(eax, left->reg());
+    // Sign extend eax into edx:eax.
+    __ cdq();
+    // Check for 0 divisor.
+    __ test(right->reg(), Operand(right->reg()));
+    deferred->Branch(zero);
+    // Divide edx:eax by the right operand.
+    __ idiv(right->reg());
+
+    // Complete the operation.
+    if (op == Token::DIV) {
+      // Check for negative zero result.  If result is zero, and divisor
+      // is negative, return a floating point negative zero.  The
+      // virtual frame is unchanged in this block, so local control flow
+      // can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(left->reg(), Operand(left->reg()));
+      __ j(not_zero, &non_zero_result);
+      __ test(right->reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by
+      // idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      deferred->Branch(equal);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      deferred->Branch(not_zero);
+      // Tag the result and store it in the quotient register.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&quotient);
+    } else {
+      ASSERT(op == Token::MOD);
+      // Check for a negative zero result.  If the result is zero, and
+      // the dividend is negative, return a floating point negative
+      // zero.  The frame is unchanged in this block, so local control
+      // flow can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &non_zero_result, taken);
+      __ test(left->reg(), Operand(left->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&remainder);
+    }
+    return;
+  }
+
+  // Special handling of shift operations because they use fixed
+  // registers.
+  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+    // Move left out of ecx if necessary.
+    if (left->is_register() && left->reg().is(ecx)) {
+      *left = allocator_->Allocate();
+      ASSERT(left->is_valid());
+      __ mov(left->reg(), ecx);
+    }
+    right->ToRegister(ecx);
+    left->ToRegister();
+    ASSERT(left->is_register() && !left->reg().is(ecx));
+    ASSERT(right->is_register() && right->reg().is(ecx));
+
+    // We will modify right, it must be spilled.
+    frame_->Spill(ecx);
+
+    // Use a fresh answer register to avoid spilling the left operand.
+    Result answer = allocator_->Allocate();
+    ASSERT(answer.is_valid());
+    // Check that both operands are smis using the answer register as a
+    // temporary.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          answer.reg(),
+                                          left->reg(),
+                                          ecx,
+                                          overwrite_mode);
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(ecx));
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(not_zero);
+
+    // Untag both operands.
+    __ mov(answer.reg(), left->reg());
+    __ sar(answer.reg(), kSmiTagSize);
+    __ sar(ecx, kSmiTagSize);
+    // Perform the operation.
+    switch (op) {
+      case Token::SAR:
+        __ sar(answer.reg());
+        // No checks of result necessary
+        break;
+      case Token::SHR: {
+        Label result_ok;
+        __ shr(answer.reg());
+        // Check that the *unsigned* result fits in a smi.  Neither of
+        // the two high-order bits can be set:
+        //  * 0x80000000: high bit would be lost when smi tagging.
+        //  * 0x40000000: this number would convert to negative when smi
+        //    tagging.
+        // These two cases can only happen with shifts by 0 or 1 when
+        // handed a valid smi.  If the answer cannot be represented by a
+        // smi, restore the left and right arguments, and jump to slow
+        // case.  The low bit of the left argument may be lost, but only
+        // in a case where it is dropped anyway.
+        __ test(answer.reg(), Immediate(0xc0000000));
+        __ j(zero, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      case Token::SHL: {
+        Label result_ok;
+        __ shl(answer.reg());
+        // Check that the *signed* result fits in a smi.
+        __ cmp(answer.reg(), 0xc0000000);
+        __ j(positive, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    // Smi-tag the result in answer.
+    ASSERT(kSmiTagSize == 1);  // Adjust code if not the case.
+    __ lea(answer.reg(),
+           Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+    deferred->BindExit();
+    left->Unuse();
+    right->Unuse();
+    frame_->Push(&answer);
+    return;
+  }
+
+  // Handle the other binary operations.
+  left->ToRegister();
+  right->ToRegister();
+  // A newly allocated register answer is used to hold the answer.  The
+  // registers containing left and right are not modified so they don't
+  // need to be spilled in the fast case.
+  Result answer = allocator_->Allocate();
+  ASSERT(answer.is_valid());
+
+  // Perform the smi tag check.
+  DeferredInlineBinaryOperation* deferred =
+      new DeferredInlineBinaryOperation(op,
+                                        answer.reg(),
+                                        left->reg(),
+                                        right->reg(),
+                                        overwrite_mode);
+  if (left->reg().is(right->reg())) {
+    __ test(left->reg(), Immediate(kSmiTagMask));
+  } else {
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(right->reg()));
+    ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+  }
+  deferred->Branch(not_zero);
+  __ mov(answer.reg(), left->reg());
+  switch (op) {
+    case Token::ADD:
+      __ add(answer.reg(), Operand(right->reg()));  // Add optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::SUB:
+      __ sub(answer.reg(), Operand(right->reg()));  // Subtract optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::MUL: {
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // Remove smi tag from the left operand (but keep sign).
+      // Left-hand operand has been copied into answer.
+      __ sar(answer.reg(), kSmiTagSize);
+      // Do multiplication of smis, leaving result in answer.
+      __ imul(answer.reg(), Operand(right->reg()));
+      // Go slow on overflows.
+      deferred->Branch(overflow);
+      // Check for negative zero result.  If product is zero, and one
+      // argument is negative, go to slow case.  The frame is unchanged
+      // in this block, so local control flow can use a Label rather
+      // than a JumpTarget.
+      Label non_zero_result;
+      __ test(answer.reg(), Operand(answer.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ mov(answer.reg(), left->reg());
+      __ or_(answer.reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
+      __ bind(&non_zero_result);
+      break;
+    }
+
+    case Token::BIT_OR:
+      __ or_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(answer.reg(), Operand(right->reg()));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+  deferred->BindExit();
+  left->Unuse();
+  right->Unuse();
+  frame_->Push(&answer);
+}
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+  DeferredInlineSmiOperation(Token::Value op,
+                             Register dst,
+                             Register src,
+                             Smi* value,
+                             OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        src_(src),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register src_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+  __ push(src_);
+  __ push(Immediate(value_));
+  // For mod we don't generate all the Smi code inline.
+  GenericBinaryOpStub stub(
+      op_,
+      overwrite_mode_,
+      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiOperationReversed(Token::Value op,
+                                     Register dst,
+                                     Smi* value,
+                                     Register src,
+                                     OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        value_(value),
+        src_(src),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperationReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Smi* value_;
+  Register src_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperationReversed::Generate() {
+  __ push(Immediate(value_));
+  __ push(src_);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of src + value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+  DeferredInlineSmiAdd(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAdd");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAdd::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of value + src is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiAddReversed(Register dst,
+                               Smi* value,
+                               OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAddReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(Immediate(value_));
+  __ push(dst_);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of src - value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract.  The result is left in
+// dst.
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+  DeferredInlineSmiSub(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiSub");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+  // Undo the optimistic sub operation and call the shared stub.
+  __ add(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+                                               Result* operand,
+                                               Handle<Object> value,
+                                               SmiAnalysis* type,
+                                               bool reversed,
+                                               OverwriteMode overwrite_mode) {
+  // NOTE: This is an attempt to inline (a bit) more of the code for
+  // some possible smi operations (like + and -) when (at least) one
+  // of the operands is a constant smi.
+  // Consumes the argument "operand".
+
+  // TODO(199): Optimize some special cases of operations involving a
+  // smi literal (multiply by 2, shift by 0, etc.).
+  if (IsUnsafeSmi(value)) {
+    Result unsafe_operand(value);
+    if (reversed) {
+      LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+                               overwrite_mode);
+    } else {
+      LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+                               overwrite_mode);
+    }
+    ASSERT(!operand->is_valid());
+    return;
+  }
+
+  // Get the literal value.
+  Smi* smi_value = Smi::cast(*value);
+  int int_value = smi_value->value();
+
+  switch (op) {
+    case Token::ADD: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+
+      // Optimistically add.  Call the specialized add stub if the
+      // result is not a smi or overflows.
+      DeferredCode* deferred = NULL;
+      if (reversed) {
+        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+                                                    smi_value,
+                                                    overwrite_mode);
+      } else {
+        deferred = new DeferredInlineSmiAdd(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+      }
+      __ add(Operand(operand->reg()), Immediate(value));
+      deferred->Branch(overflow);
+      __ test(operand->reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    case Token::SUB: {
+      DeferredCode* deferred = NULL;
+      Result answer;  // Only allocate a new register if reversed.
+      if (reversed) {
+        // The reversed case is only hit when the right operand is not a
+        // constant.
+        ASSERT(operand->is_register());
+        answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        __ Set(answer.reg(), Immediate(value));
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          answer.reg(),
+                                                          smi_value,
+                                                          operand->reg(),
+                                                          overwrite_mode);
+        __ sub(answer.reg(), Operand(operand->reg()));
+      } else {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        answer = *operand;
+        deferred = new DeferredInlineSmiSub(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+        __ sub(Operand(operand->reg()), Immediate(value));
+      }
+      deferred->Branch(overflow);
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      deferred->BindExit();
+      operand->Unuse();
+      frame_->Push(&answer);
+      break;
+    }
+
+    case Token::SAR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+        if (shift_value > 0) {
+          __ sar(operand->reg(), shift_value);
+          __ and_(operand->reg(), ~kSmiTagMask);
+        }
+        deferred->BindExit();
+        frame_->Push(operand);
+      }
+      break;
+
+    case Token::SHR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        Result answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           answer.reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+        __ mov(answer.reg(), operand->reg());
+        __ sar(answer.reg(), kSmiTagSize);
+        __ shr(answer.reg(), shift_value);
+        // A negative Smi shifted right two is in the positive Smi range.
+        if (shift_value < 2) {
+          __ test(answer.reg(), Immediate(0xc0000000));
+          deferred->Branch(not_zero);
+        }
+        operand->Unuse();
+        ASSERT(kSmiTagSize == times_2);  // Adjust the code if not true.
+        __ lea(answer.reg(),
+               Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+        deferred->BindExit();
+        frame_->Push(&answer);
+      }
+      break;
+
+    case Token::SHL:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        if (shift_value == 0) {
+          // Spill operand so it can be overwritten in the slow case.
+          frame_->Spill(operand->reg());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          deferred->BindExit();
+          frame_->Push(operand);
+        } else {
+          // Use a fresh temporary for nonzero shift values.
+          Result answer = allocator()->Allocate();
+          ASSERT(answer.is_valid());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             answer.reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          __ mov(answer.reg(), operand->reg());
+          ASSERT(kSmiTag == 0);  // adjust code if not the case
+          // We do no shifts, only the Smi conversion, if shift_value is 1.
+          if (shift_value > 1) {
+            __ shl(answer.reg(), shift_value - 1);
+          }
+          // Convert int result to Smi, checking that it is in int range.
+          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+          __ add(answer.reg(), Operand(answer.reg()));
+          deferred->Branch(overflow);
+          deferred->BindExit();
+          operand->Unuse();
+          frame_->Push(&answer);
+        }
+      }
+      break;
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+      DeferredCode* deferred = NULL;
+      if (reversed) {
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          operand->reg(),
+                                                          smi_value,
+                                                          operand->reg(),
+                                                          overwrite_mode);
+      } else {
+        deferred =  new DeferredInlineSmiOperation(op,
+                                                   operand->reg(),
+                                                   operand->reg(),
+                                                   smi_value,
+                                                   overwrite_mode);
+      }
+      __ test(operand->reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      if (op == Token::BIT_AND) {
+        __ and_(Operand(operand->reg()), Immediate(value));
+      } else if (op == Token::BIT_XOR) {
+        if (int_value != 0) {
+          __ xor_(Operand(operand->reg()), Immediate(value));
+        }
+      } else {
+        ASSERT(op == Token::BIT_OR);
+        if (int_value != 0) {
+          __ or_(Operand(operand->reg()), Immediate(value));
+        }
+      }
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    // Generate inline code for mod of powers of 2 and negative powers of 2.
+    case Token::MOD:
+      if (!reversed &&
+          int_value != 0 &&
+          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+                                                                operand->reg(),
+                                                                operand->reg(),
+                                                                smi_value,
+                                                                overwrite_mode);
+        // Check for negative or non-Smi left hand side.
+        __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
+        deferred->Branch(not_zero);
+        if (int_value < 0) int_value = -int_value;
+        if (int_value == 1) {
+          __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
+        } else {
+          __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
+        }
+        deferred->BindExit();
+        frame_->Push(operand);
+        break;
+      }
+      // Fall through if we did not find a power of 2 on the right hand side!
+
+    default: {
+      Result constant_operand(value);
+      if (reversed) {
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                 overwrite_mode);
+      }
+      break;
+    }
+  }
+  ASSERT(!operand->is_valid());
+}
+
+
+void CodeGenerator::Comparison(Condition cc,
+                               bool strict,
+                               ControlDestination* dest) {
+  // Strict only makes sense for equality comparisons.
+  ASSERT(!strict || cc == equal);
+
+  Result left_side;
+  Result right_side;
+  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+  if (cc == greater || cc == less_equal) {
+    cc = ReverseCondition(cc);
+    left_side = frame_->Pop();
+    right_side = frame_->Pop();
+  } else {
+    right_side = frame_->Pop();
+    left_side = frame_->Pop();
+  }
+  ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+  // If either side is a constant smi, optimize the comparison.
+  bool left_side_constant_smi =
+      left_side.is_constant() && left_side.handle()->IsSmi();
+  bool right_side_constant_smi =
+      right_side.is_constant() && right_side.handle()->IsSmi();
+  bool left_side_constant_null =
+      left_side.is_constant() && left_side.handle()->IsNull();
+  bool right_side_constant_null =
+      right_side.is_constant() && right_side.handle()->IsNull();
+
+  if (left_side_constant_smi || right_side_constant_smi) {
+    if (left_side_constant_smi && right_side_constant_smi) {
+      // Trivial case, comparing two constants.
+      int left_value = Smi::cast(*left_side.handle())->value();
+      int right_value = Smi::cast(*right_side.handle())->value();
+      switch (cc) {
+        case less:
+          dest->Goto(left_value < right_value);
+          break;
+        case equal:
+          dest->Goto(left_value == right_value);
+          break;
+        case greater_equal:
+          dest->Goto(left_value >= right_value);
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {  // Only one side is a constant Smi.
+      // If left side is a constant Smi, reverse the operands.
+      // Since one side is a constant Smi, conversion order does not matter.
+      if (left_side_constant_smi) {
+        Result temp = left_side;
+        left_side = right_side;
+        right_side = temp;
+        cc = ReverseCondition(cc);
+        // This may reintroduce greater or less_equal as the value of cc.
+        // CompareStub and the inline code both support all values of cc.
+      }
+      // Implement comparison against a constant Smi, inlining the case
+      // where both sides are Smis.
+      left_side.ToRegister();
+
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
+      __ test(left_side.reg(), Immediate(kSmiTagMask));
+      is_smi.Branch(zero, taken);
+
+      // Setup and call the compare stub.
+      CompareStub stub(cc, strict);
+      Result result = frame_->CallStub(&stub, &left_side, &right_side);
+      result.ToRegister();
+      __ cmp(result.reg(), 0);
+      result.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_val);
+      // Test smi equality and comparison by signed int comparison.
+      if (IsUnsafeSmi(right_side.handle())) {
+        right_side.ToRegister();
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+      } else {
+        __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
+      }
+      left_side.Unuse();
+      right_side.Unuse();
+      dest->Split(cc);
+    }
+  } else if (cc == equal &&
+             (left_side_constant_null || right_side_constant_null)) {
+    // To make null checks efficient, we check if either the left side or
+    // the right side is the constant 'null'.
+    // If so, we optimize the code by inlining a null check instead of
+    // calling the (very) general runtime routine for checking equality.
+    Result operand = left_side_constant_null ? right_side : left_side;
+    right_side.Unuse();
+    left_side.Unuse();
+    operand.ToRegister();
+    __ cmp(operand.reg(), Factory::null_value());
+    if (strict) {
+      operand.Unuse();
+      dest->Split(equal);
+    } else {
+      // The 'null' value is only equal to 'undefined' if using non-strict
+      // comparisons.
+      dest->true_target()->Branch(equal);
+      __ cmp(operand.reg(), Factory::undefined_value());
+      dest->true_target()->Branch(equal);
+      __ test(operand.reg(), Immediate(kSmiTagMask));
+      dest->false_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      // Use a scratch register in preference to spilling operand.reg().
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(),
+             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      temp.Unuse();
+      operand.Unuse();
+      dest->Split(not_zero);
+    }
+  } else {  // Neither side is a constant Smi or null.
+    // If either side is a non-smi constant, skip the smi check.
+    bool known_non_smi =
+        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+        (right_side.is_constant() && !right_side.handle()->IsSmi());
+    left_side.ToRegister();
+    right_side.ToRegister();
+
+    if (known_non_smi) {
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
+      dest->Split(cc);
+    } else {
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Register right_reg = right_side.reg();
+
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), left_side.reg());
+      __ or_(temp.reg(), Operand(right_side.reg()));
+      __ test(temp.reg(), Immediate(kSmiTagMask));
+      temp.Unuse();
+      is_smi.Branch(zero, taken);
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_reg);
+      __ cmp(left_side.reg(), Operand(right_side.reg()));
+      right_side.Unuse();
+      left_side.Unuse();
+      dest->Split(cc);
+    }
+  }
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int argc_;
+  InLoopFlag in_loop_;
+
+#ifdef DEBUG
+  void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+  Major MajorKey() { return CallFunction; }
+  int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+                                      int position) {
+  // Push the arguments ("left-to-right") on the stack.
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Record the position for debugging purposes.
+  CodeForSourcePosition(position);
+
+  // Use the shared code stub to call the function.
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
+  Result answer = frame_->CallStub(&call_function, arg_count + 1);
+  // Restore context and replace function on the stack with the
+  // result of the stub invocation.
+  frame_->RestoreContextRegister();
+  frame_->SetElementAt(0, &answer);
+}
+
+
+void CodeGenerator::CallApplyLazy(Property* apply,
+                                  Expression* receiver,
+                                  VariableProxy* arguments,
+                                  int position) {
+  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+  ASSERT(arguments->IsArguments());
+
+  JumpTarget slow, done;
+
+  // Load the apply function onto the stack. This will usually
+  // give us a megamorphic load site. Not super, but it works.
+  Reference ref(this, apply);
+  ref.GetValue(NOT_INSIDE_TYPEOF);
+  ASSERT(ref.type() == Reference::NAMED);
+
+  // Load the receiver and the existing arguments object onto the
+  // expression stack. Avoid allocating the arguments object here.
+  Load(receiver);
+  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+  // Emit the source position information after having loaded the
+  // receiver and the arguments.
+  CodeForSourcePosition(position);
+
+  // Check if the arguments object has been lazily allocated
+  // already. If so, just use that instead of copying the arguments
+  // from the stack. This also deals with cases where a local variable
+  // named 'arguments' has been introduced.
+  frame_->Dup();
+  Result probe = frame_->Pop();
+  bool try_lazy = true;
+  if (probe.is_constant()) {
+    try_lazy = probe.handle()->IsTheHole();
+  } else {
+    __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+    probe.Unuse();
+    slow.Branch(not_equal);
+  }
+
+  if (try_lazy) {
+    JumpTarget build_args;
+
+    // Get rid of the arguments object probe.
+    frame_->Drop();
+
+    // Before messing with the execution stack, we sync all
+    // elements. This is bound to happen anyway because we're
+    // about to call a function.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    // Check that the receiver really is a JavaScript object.
+    { frame_->PushElementAt(0);
+      Result receiver = frame_->Pop();
+      receiver.ToRegister();
+      __ test(receiver.reg(), Immediate(kSmiTagMask));
+      build_args.Branch(zero);
+      Result tmp = allocator_->Allocate();
+      // We allow all JSObjects including JSFunctions.  As long as
+      // JS_FUNCTION_TYPE is the last instance type and it is right
+      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+      // bound.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+      __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
+      build_args.Branch(less);
+    }
+
+    // Verify that we're invoking Function.prototype.apply.
+    { frame_->PushElementAt(1);
+      Result apply = frame_->Pop();
+      apply.ToRegister();
+      __ test(apply.reg(), Immediate(kSmiTagMask));
+      build_args.Branch(zero);
+      Result tmp = allocator_->Allocate();
+      __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+      build_args.Branch(not_equal);
+      __ mov(tmp.reg(),
+             FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+      Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+      __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+             Immediate(apply_code));
+      build_args.Branch(not_equal);
+    }
+
+    // Get the function receiver from the stack. Check that it
+    // really is a function.
+    __ mov(edi, Operand(esp, 2 * kPointerSize));
+    __ test(edi, Immediate(kSmiTagMask));
+    build_args.Branch(zero);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    build_args.Branch(not_equal);
+
+    // Copy the arguments to this function possibly from the
+    // adaptor frame below it.
+    Label invoke, adapted;
+    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+    __ cmp(Operand(ecx),
+           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted);
+
+    // No arguments adaptor frame. Copy fixed number of arguments.
+    __ mov(eax, Immediate(scope_->num_parameters()));
+    for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ push(frame_->ParameterAt(i));
+    }
+    __ jmp(&invoke);
+
+    // Arguments adaptor frame present. Copy arguments from there, but
+    // avoid copying too many arguments to avoid stack overflows.
+    __ bind(&adapted);
+    static const uint32_t kArgumentsLimit = 1 * KB;
+    __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ shr(eax, kSmiTagSize);
+    __ mov(ecx, Operand(eax));
+    __ cmp(eax, kArgumentsLimit);
+    build_args.Branch(above);
+
+    // Loop through the arguments pushing them onto the execution
+    // stack. We don't inform the virtual frame of the push, so we don't
+    // have to worry about getting rid of the elements from the virtual
+    // frame.
+    Label loop;
+    __ bind(&loop);
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &invoke);
+    __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
+    __ dec(ecx);
+    __ jmp(&loop);
+
+    // Invoke the function. The virtual frame knows about the receiver
+    // so make sure to forget that explicitly.
+    __ bind(&invoke);
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    frame_->Forget(1);
+    Result result = allocator()->Allocate(eax);
+    frame_->SetElementAt(0, &result);
+    done.Jump();
+
+    // Slow-case: Allocate the arguments object since we know it isn't
+    // there, and fall-through to the slow-case where we call
+    // Function.prototype.apply.
+    build_args.Bind();
+    Result arguments_object = StoreArgumentsObject(false);
+    frame_->Push(&arguments_object);
+    slow.Bind();
+  }
+
+  // Flip the apply function and the function to call on the stack, so
+  // the function looks like the receiver of the apply call. This way,
+  // the generic Function.prototype.apply implementation can deal with
+  // the call like it usually does.
+  Result a2 = frame_->Pop();
+  Result a1 = frame_->Pop();
+  Result ap = frame_->Pop();
+  Result fn = frame_->Pop();
+  frame_->Push(&ap);
+  frame_->Push(&fn);
+  frame_->Push(&a1);
+  frame_->Push(&a2);
+  CallFunctionStub call_function(2, NOT_IN_LOOP);
+  Result res = frame_->CallStub(&call_function, 3);
+  frame_->Push(&res);
+
+  // All done. Restore context register after call.
+  if (try_lazy) done.Bind();
+  frame_->RestoreContextRegister();
+}
+
+
+class DeferredStackCheck: public DeferredCode {
+ public:
+  DeferredStackCheck() {
+    set_comment("[ DeferredStackCheck");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+  StackCheckStub stub;
+  __ CallStub(&stub);
+}
+
+
+void CodeGenerator::CheckStack() {
+  if (FLAG_check_stack) {
+    DeferredStackCheck* deferred = new DeferredStackCheck;
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+    deferred->Branch(below);
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Visit(statement);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  VisitStatements(statements);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  ASSERT(!in_spilled_code());
+  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ Block");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  VisitStatements(node->statements());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.  The inevitable call
+  // will sync frame elements to memory anyway, so we do it eagerly to
+  // allow us to push the arguments directly into place.
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  frame_->EmitPush(Immediate(pairs));
+  frame_->EmitPush(esi);  // The context is the second argument.
+  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+  Comment cmnt(masm_, "[ Declaration");
+  Variable* var = node->proxy()->var();
+  ASSERT(var != NULL);  // must have been resolved
+  Slot* slot = var->slot();
+
+  // If it was not possible to allocate the variable at compile time,
+  // we need to "declare" it at runtime to make sure it actually
+  // exists in the local context.
+  if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    // Variables with a "LOOKUP" slot were introduced as non-locals
+    // during variable resolution and must have mode DYNAMIC.
+    ASSERT(var->is_dynamic());
+    // For now, just do a runtime call.  Sync the virtual frame eagerly
+    // so we can simply push the arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(var->name()));
+    // Declaration nodes are always introduced in one of two modes.
+    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+    // Push initial value, if any.
+    // Note: For variables we must not push an initial value (such as
+    // 'undefined') because we may have a (legal) redeclaration and we
+    // must not destroy the current value.
+    if (node->mode() == Variable::CONST) {
+      frame_->EmitPush(Immediate(Factory::the_hole_value()));
+    } else if (node->fun() != NULL) {
+      Load(node->fun());
+    } else {
+      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
+    }
+    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+    // Ignore the return value (declarations are statements).
+    return;
+  }
+
+  ASSERT(!var->is_global());
+
+  // If we have a function or a constant, we need to initialize the variable.
+  Expression* val = NULL;
+  if (node->mode() == Variable::CONST) {
+    val = new Literal(Factory::the_hole_value());
+  } else {
+    val = node->fun();  // NULL if we don't have a function
+  }
+
+  if (val != NULL) {
+    {
+      // Set the initial value.
+      Reference target(this, node->proxy());
+      Load(val);
+      target.SetValue(NOT_CONST_INIT);
+      // The reference is removed from the stack (preserving TOS) when
+      // it goes out of scope.
+    }
+    // Get rid of the assigned value (declarations are statements).
+    frame_->Drop();
+  }
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  Load(expression);
+  // Remove the lingering expression result from the top of stack.
+  frame_->Drop();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "// EmptyStatement");
+  CodeForStatementPosition(node);
+  // nothing to do
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ IfStatement");
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
+  bool has_then_stm = node->HasThenStatement();
+  bool has_else_stm = node->HasElseStatement();
+
+  CodeForStatementPosition(node);
+  JumpTarget exit;
+  if (has_then_stm && has_else_stm) {
+    JumpTarget then;
+    JumpTarget else_;
+    ControlDestination dest(&then, &else_, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The else target was bound, so we compile the else part first.
+      Visit(node->else_statement());
+
+      // We may have dangling jumps to the then part.
+      if (then.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then target was bound, so we compile the then part first.
+      Visit(node->then_statement());
+
+      if (else_.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    }
+
+  } else if (has_then_stm) {
+    ASSERT(!has_else_stm);
+    JumpTarget then;
+    ControlDestination dest(&then, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // then part.
+      if (then.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then label was bound.
+      Visit(node->then_statement());
+    }
+
+  } else if (has_else_stm) {
+    ASSERT(!has_then_stm);
+    JumpTarget else_;
+    ControlDestination dest(&exit, &else_, false);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.true_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // else part.
+      if (else_.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    } else {
+      // The else label was bound.
+      Visit(node->else_statement());
+    }
+
+  } else {
+    ASSERT(!has_then_stm && !has_else_stm);
+    // We only care about the condition's side effects (not its value
+    // or control flow effect).  LoadCondition is called without
+    // forcing control flow.
+    ControlDestination dest(&exit, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+    if (!dest.is_used()) {
+      // We got a value on the frame rather than (or in addition to)
+      // control flow.
+      frame_->Drop();
+    }
+  }
+
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ContinueStatement");
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ BreakStatement");
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result return_value = frame_->Pop();
+  if (function_return_is_shadowed_) {
+    function_return_.Jump(&return_value);
+  } else {
+    frame_->PrepareForReturn();
+    if (function_return_.is_bound()) {
+      // If the function return label is already bound we reuse the
+      // code by jumping to the return site.
+      function_return_.Jump(&return_value);
+    } else {
+      function_return_.Bind(&return_value);
+      GenerateReturnSequence(&return_value);
+    }
+  }
+}
+
+
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+  // The return value is a live (but not currently reference counted)
+  // reference to eax.  This is safe because the current frame does not
+  // contain a reference to eax (it is prepared for the return by spilling
+  // all registers).
+  if (FLAG_trace) {
+    frame_->Push(return_value);
+    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+  }
+  return_value->ToRegister(eax);
+
+  // Add a label for checking the size of the code used for returning.
+  Label check_exit_codesize;
+  masm_->bind(&check_exit_codesize);
+
+  // Leave the frame and return popping the arguments and the
+  // receiver.
+  frame_->Exit();
+  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  DeleteFrame();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check that the size of the code used for returning matches what is
+  // expected by the debugger.
+  ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result context;
+  if (node->is_catch_block()) {
+    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    context = frame_->CallRuntime(Runtime::kPushContext, 1);
+  }
+
+  // Update context local.
+  frame_->SaveContextRegister();
+
+  // Verify that the runtime call result and esi agree.
+  if (FLAG_debug_code) {
+    __ cmp(context.reg(), Operand(esi));
+    __ Assert(equal, "Runtime::NewContext should end up in esi");
+  }
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithExitStatement");
+  CodeForStatementPosition(node);
+  // Pop context.
+  __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
+  // Update context local.
+  frame_->SaveContextRegister();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ SwitchStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Compile the switch value.
+  Load(node->tag());
+
+  ZoneList<CaseClause*>* cases = node->cases();
+  int length = cases->length();
+  CaseClause* default_clause = NULL;
+
+  JumpTarget next_test;
+  // Compile the case label expressions and comparisons.  Exit early
+  // if a comparison is unconditionally true.  The target next_test is
+  // bound before the loop in order to indicate control flow to the
+  // first comparison.
+  next_test.Bind();
+  for (int i = 0; i < length && !next_test.is_unused(); i++) {
+    CaseClause* clause = cases->at(i);
+    // The default is not a test, but remember it for later.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    // We recycle the same target next_test for each test.  Bind it if
+    // the previous test has not done so and then unuse it for the
+    // loop.
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    next_test.Unuse();
+
+    // Duplicate the switch value.
+    frame_->Dup();
+
+    // Compile the label expression.
+    Load(clause->label());
+
+    // Compare and branch to the body if true or the next test if
+    // false.  Prefer the next test as a fall through.
+    ControlDestination dest(clause->body_target(), &next_test, false);
+    Comparison(equal, true, &dest);
+
+    // If the comparison fell through to the true target, jump to the
+    // actual body.
+    if (dest.true_was_fall_through()) {
+      clause->body_target()->Unuse();
+      clause->body_target()->Jump();
+    }
+  }
+
+  // If there was control flow to a next test from the last one
+  // compiled, compile a jump to the default or break target.
+  if (!next_test.is_unused()) {
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    // Drop the switch value.
+    frame_->Drop();
+    if (default_clause != NULL) {
+      default_clause->body_target()->Jump();
+    } else {
+      node->break_target()->Jump();
+    }
+  }
+
+
+  // The last instruction emitted was a jump, either to the default
+  // clause or the break target, or else to a case body from the loop
+  // that compiles the tests.
+  ASSERT(!has_valid_frame());
+  // Compile case bodies as needed.
+  for (int i = 0; i < length; i++) {
+    CaseClause* clause = cases->at(i);
+
+    // There are two ways to reach the body: from the corresponding
+    // test or as the fall through of the previous body.
+    if (clause->body_target()->is_linked() || has_valid_frame()) {
+      if (clause->body_target()->is_linked()) {
+        if (has_valid_frame()) {
+          // If we have both a jump to the test and a fall through, put
+          // a jump on the fall through path to avoid the dropping of
+          // the switch value on the test path.  The exception is the
+          // default which has already had the switch value dropped.
+          if (clause->is_default()) {
+            clause->body_target()->Bind();
+          } else {
+            JumpTarget body;
+            body.Jump();
+            clause->body_target()->Bind();
+            frame_->Drop();
+            body.Bind();
+          }
+        } else {
+          // No fall through to worry about.
+          clause->body_target()->Bind();
+          if (!clause->is_default()) {
+            frame_->Drop();
+          }
+        }
+      } else {
+        // Otherwise, we have only fall through.
+        ASSERT(has_valid_frame());
+      }
+
+      // We are now prepared to compile the body.
+      Comment cmnt(masm_, "[ Case body");
+      VisitStatements(clause->statements());
+    }
+    clause->body_target()->Unuse();
+  }
+
+  // We may not have a valid frame here so bind the break target only
+  // if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ LoopStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
+  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+  if (node->cond() == NULL) {
+    ASSERT(node->type() == LoopStatement::FOR_LOOP);
+    info = ALWAYS_TRUE;
+  } else {
+    Literal* lit = node->cond()->AsLiteral();
+    if (lit != NULL) {
+      if (lit->IsTrue()) {
+        info = ALWAYS_TRUE;
+      } else if (lit->IsFalse()) {
+        info = ALWAYS_FALSE;
+      }
+    }
+  }
+
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
+      IncrementLoopNesting();
+
+      // Label the top of the loop for the backward jump if necessary.
+      if (info == ALWAYS_TRUE) {
+        // Use the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        // No need to label it.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Continue is the test, so use the backward body target.
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        body.Bind();
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        // If control flow can fall off the end of the body, jump back
+        // to the top and bind the break target at the exit.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else if (info == ALWAYS_FALSE) {
+        // We may have had continues or breaks in the body.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+      }
+      break;
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything.
+      if (info == ALWAYS_FALSE) break;
+
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop with the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // Continue is the test at the bottom, no need to label the
+          // test at the top.  The body is a backward target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else {
+          // Label the test at the top as the continue target.  The
+          // body is a forward-only target.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        }
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        // The loop body has been labeled with the continue target.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // If we have chosen to recompile the test at the bottom,
+          // then it is the continue target.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a backward
+            // jump from here and thus an invalid fall-through).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // If we have chosen not to recompile the test at the
+          // bottom, jump back to the one at the top.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      // Compile the init expression if present.
+      if (node->init() != NULL) {
+        Visit(node->init());
+      }
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything else.
+      if (info == ALWAYS_FALSE) break;
+
+      // Target for backward edge if no test at the bottom, otherwise
+      // unused.
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+      // Target for backward edge if there is a test at the bottom,
+      // otherwise used as target for test at the top.
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop.
+        if (node->next() == NULL) {
+          // Use the continue target if there is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // Otherwise use the backward loop target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        if (test_at_bottom) {
+          // Continue is either the update expression or the test at
+          // the bottom, no need to label the test at the top.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else if (node->next() == NULL) {
+          // We are not recompiling the test at the bottom and there
+          // is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // We are not recompiling the test at the bottom and there
+          // is an update expression.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // If there is an update expression, compile it if necessary.
+      if (node->next() != NULL) {
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+
+        // Control can reach the update by falling out of the body or
+        // by a continue.
+        if (has_valid_frame()) {
+          // Record the source position of the statement as this code
+          // which is after the code for the body actually belongs to
+          // the loop statement and not the body.
+          CodeForStatementPosition(node);
+          Visit(node->next());
+        }
+      }
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          if (node->next() == NULL) {
+            node->continue_target()->Jump();
+          } else {
+            loop.Jump();
+          }
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          if (node->continue_target()->is_linked()) {
+            // We can have dangling jumps to the continue target if
+            // there was no update expression.
+            node->continue_target()->Bind();
+          }
+          // Control can reach the test at the bottom by falling out
+          // of the body, by a continue in the body, or from the
+          // update expression.
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a
+            // backward jump from here).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // Otherwise, jump back to the test at the top.
+          if (has_valid_frame()) {
+            if (node->next() == NULL) {
+              node->continue_target()->Jump();
+            } else {
+              loop.Jump();
+            }
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+  }
+
+  DecrementLoopNesting();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForInStatement");
+  CodeForStatementPosition(node);
+
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
+
+  // Get the object to enumerate over (converted to JSObject).
+  LoadAndSpill(node->enumerable());
+
+  // Both SpiderMonkey and kjs ignore null and undefined in contrast
+  // to the specification.  12.6.4 mandates a call to ToObject.
+  frame_->EmitPop(eax);
+
+  // eax: value to be iterated over
+  __ cmp(eax, Factory::undefined_value());
+  exit.Branch(equal);
+  __ cmp(eax, Factory::null_value());
+  exit.Branch(equal);
+
+  // Stack layout in body:
+  // [iteration counter (smi)] <- slot 0
+  // [length of array]         <- slot 1
+  // [FixedArray]              <- slot 2
+  // [Map or 0]                <- slot 3
+  // [Object]                  <- slot 4
+
+  // Check if enumerable is already a JSObject
+  // eax: value to be iterated over
+  __ test(eax, Immediate(kSmiTagMask));
+  primitive.Branch(zero);
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  jsobject.Branch(above_equal);
+
+  primitive.Bind();
+  frame_->EmitPush(eax);
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+  // function call returns the value in eax, which is where we want it below
+
+  jsobject.Bind();
+  // Get the set of properties (as a FixedArray or Map).
+  // eax: value to be iterated over
+  frame_->EmitPush(eax);  // push the object being iterated over (slot 4)
+
+  frame_->EmitPush(eax);  // push the Object (slot 4) for the runtime call
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a Map, we can do a fast modification check.
+  // Otherwise, we got a FixedArray, and we have to do a slow check.
+  // eax: map or fixed array (result from call to
+  // Runtime::kGetPropertyNamesFast)
+  __ mov(edx, Operand(eax));
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(ecx, Factory::meta_map());
+  fixed_array.Branch(not_equal);
+
+  // Get enum cache
+  // eax: map (result from call to Runtime::kGetPropertyNamesFast)
+  __ mov(ecx, Operand(eax));
+  __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
+  // Get the bridge array held in the enumeration index field.
+  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+  // Get the cache from the bridge array.
+  __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  frame_->EmitPush(eax);  // <- slot 3
+  frame_->EmitPush(edx);  // <- slot 2
+  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
+  __ shl(eax, kSmiTagSize);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  entry.Jump();
+
+  fixed_array.Bind();
+  // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(eax);  // <- slot 2
+
+  // Push the length of the array and the initial index onto the stack.
+  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+  __ shl(eax, kSmiTagSize);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+
+  // Condition.
+  entry.Bind();
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  __ mov(eax, frame_->ElementAt(0));  // load the current count
+  __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
+  node->break_target()->Branch(above_equal);
+
+  // Get the i'th entry of the array.
+  __ mov(edx, frame_->ElementAt(2));
+  __ mov(ebx, Operand(edx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case eax: current iteration count ebx: i'th entry
+  // of the enum cache
+  __ mov(edx, frame_->ElementAt(3));
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  // eax: current iteration count
+  // ebx: i'th entry of the enum cache
+  // edx: expected map value
+  __ mov(ecx, frame_->ElementAt(4));
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ cmp(ecx, Operand(edx));
+  end_del_check.Branch(equal);
+
+  // Convert the entry to a string (or null if it isn't a property anymore).
+  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(ebx);  // push entry
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+  __ mov(ebx, Operand(eax));
+
+  // If the property has been removed while iterating, we just skip it.
+  __ cmp(ebx, Factory::null_value());
+  node->continue_target()->Branch(equal);
+
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  edx: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(ebx);
+  { Reference each(this, node->each());
+    // Loading a reference may leave the frame in an unspilled state.
+    frame_->SpillAll();
+    if (!each.is_illegal()) {
+      if (each.size() > 0) {
+        frame_->EmitPush(frame_->ElementAt(each.size()));
+      }
+      // If the reference was to a slot we rely on the convenient property
+      // that it doesn't matter whether a value (eg, ebx pushed above) is
+      // right on top of or right underneath a zero-sized reference.
+      each.SetValue(NOT_CONST_INIT);
+      if (each.size() > 0) {
+        // It's safe to pop the value lying on top of the reference before
+        // unloading the reference itself (which preserves the top of stack,
+        // ie, now the topmost value of the non-zero sized reference), since
+        // we will discard the top of stack after unloading the reference
+        // anyway.
+        frame_->Drop();
+      }
+    }
+  }
+  // Unloading a reference may leave the frame in an unspilled state.
+  frame_->SpillAll();
+
+  // Discard the i'th entry pushed above or else the remainder of the
+  // reference, whichever is currently on top of the stack.
+  frame_->Drop();
+
+  // Body.
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
+
+  // Next.  Reestablish a spilled frame in case we are coming here via
+  // a continue in the body.
+  node->continue_target()->Bind();
+  frame_->SpillAll();
+  frame_->EmitPop(eax);
+  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+  frame_->EmitPush(eax);
+  entry.Jump();
+
+  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
+  // any frame.
+  node->break_target()->Bind();
+  frame_->Drop(5);
+
+  // Exit.
+  exit.Bind();
+
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryCatch");
+  CodeForStatementPosition(node);
+
+  JumpTarget try_block;
+  JumpTarget exit;
+
+  try_block.Call();
+  // --- Catch block ---
+  frame_->EmitPush(eax);
+
+  // Store the caught exception in the catch variable.
+  { Reference ref(this, node->catch_var());
+    ASSERT(ref.is_slot());
+    // Load the exception to the top of the stack.  Here we make use of the
+    // convenient property that it doesn't matter whether a value is
+    // immediately on top of or underneath a zero-sized reference.
+    ref.SetValue(NOT_CONST_INIT);
+  }
+
+  // Remove the exception from the stack.
+  frame_->Drop();
+
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (has_valid_frame()) {
+    exit.Jump();
+  }
+
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // Make sure that there's nothing left on the stack above the
+  // handler structure.
+  if (FLAG_debug_code) {
+    __ mov(eax, Operand::StaticVariable(handler_address));
+    __ cmp(esp, Operand(eax));
+    __ Assert(equal, "stack pointer should point to top handler");
+  }
+
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+    if (has_unlinks) {
+      exit.Jump();
+    }
+  }
+
+  // Generate unlink code for the (formerly) shadowing targets that
+  // have been jumped to.  Deallocate each shadow target.
+  Result return_value;
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // Unlink from try chain; be careful not to destroy the TOS if
+      // there is one.
+      if (i == kReturnShadowIndex) {
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(eax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that we
+      // break from (eg, for...in) may have left stuff on the stack.
+      __ mov(esp, Operand::StaticVariable(handler_address));
+      frame_->Forget(frame_->height() - handler_height);
+
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+        shadows[i]->other_target()->Jump(&return_value);
+      } else {
+        shadows[i]->other_target()->Jump();
+      }
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryFinally");
+  CodeForStatementPosition(node);
+
+  // State: Used to keep track of reason for entering the finally
+  // block. Should probably be extended to hold information for
+  // break/continue from within the try block.
+  enum { FALLING, THROWING, JUMPING };
+
+  JumpTarget try_block;
+  JumpTarget finally_block;
+
+  try_block.Call();
+
+  frame_->EmitPush(eax);
+  // In case of thrown exceptions, this is where we continue.
+  __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
+  finally_block.Jump();
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  int nof_unlinks = 0;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    if (shadows[i]->is_linked()) nof_unlinks++;
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in ecx, then jump around the unlink blocks if any.
+    frame_->EmitPush(Immediate(Factory::undefined_value()));
+    __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // If we have come from the shadowed return, the return value is
+      // on the virtual frame.  We must preserve it until it is
+      // pushed.
+      if (i == kReturnShadowIndex) {
+        Result return_value;
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(eax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ mov(esp, Operand::StaticVariable(handler_address));
+      frame_->Forget(frame_->height() - handler_height);
+
+      // Unlink this handler and drop it from the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        // If this target shadowed the function return, materialize
+        // the return value on the stack.
+        frame_->EmitPush(eax);
+      } else {
+        // Fake TOS for targets that shadowed breaks and continues.
+        frame_->EmitPush(Immediate(Factory::undefined_value()));
+      }
+      __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
+    }
+  }
+
+  // --- Finally block ---
+  finally_block.Bind();
+
+  // Push the state on the stack.
+  frame_->EmitPush(ecx);
+
+  // We keep two elements on the stack - the (possibly faked) result
+  // and the state - while evaluating the finally block.
+  //
+  // Generate code for the statements in the finally block.
+  VisitStatementsAndSpill(node->finally_block()->statements());
+
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(ecx);
+    frame_->EmitPop(eax);
+  }
+
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      BreakTarget* original = shadows[i]->other_target();
+      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
+      if (i == kReturnShadowIndex) {
+        // The return value is (already) in eax.
+        Result return_value = allocator_->Allocate(eax);
+        ASSERT(return_value.is_valid());
+        if (function_return_is_shadowed_) {
+          original->Branch(equal, &return_value);
+        } else {
+          // Branch around the preparation for return which may emit
+          // code.
+          JumpTarget skip;
+          skip.Branch(not_equal);
+          frame_->PrepareForReturn();
+          original->Jump(&return_value);
+          skip.Bind();
+        }
+      } else {
+        original->Branch(equal);
+      }
+    }
+  }
+
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit;
+    __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
+    exit.Branch(not_equal);
+
+    // Rethrow exception.
+    frame_->EmitPush(eax);  // undo pop from above
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ DebuggerStatement");
+  CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Spill everything, even constants, to the frame.
+  frame_->SpillAll();
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
+  // Ignore the return value.
+#endif
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+  // Call the runtime to instantiate the function boilerplate object.
+  // The inevitable call will sync frame elements to memory anyway, so
+  // we do it eagerly to allow us to push the arguments directly into
+  // place.
+  ASSERT(boilerplate->IsBoilerplate());
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  // Push the boilerplate on the stack.
+  frame_->EmitPush(Immediate(boilerplate));
+
+  // Create a new closure.
+  frame_->EmitPush(esi);
+  Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+  // Check for stack-overflow exception.
+  if (HasStackOverflow()) return;
+  InstantiateBoilerplate(boilerplate);
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+  InstantiateBoilerplate(node->boilerplate());
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+  Comment cmnt(masm_, "[ Conditional");
+  JumpTarget then;
+  JumpTarget else_;
+  JumpTarget exit;
+  ControlDestination dest(&then, &else_, true);
+  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+  if (dest.false_was_fall_through()) {
+    // The else target was bound, so we compile the else part first.
+    Load(node->else_expression(), typeof_state());
+
+    if (then.is_linked()) {
+      exit.Jump();
+      then.Bind();
+      Load(node->then_expression(), typeof_state());
+    }
+  } else {
+    // The then target was bound, so we compile the then part first.
+    Load(node->then_expression(), typeof_state());
+
+    if (else_.is_linked()) {
+      exit.Jump();
+      else_.Bind();
+      Load(node->else_expression(), typeof_state());
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    JumpTarget slow;
+    JumpTarget done;
+    Result value;
+
+    // Generate fast-case code for variables that might be shadowed by
+    // eval-introduced variables.  Eval is used a lot without
+    // introducing variables.  In those cases, we do not want to
+    // perform a runtime call for all variables in the scope
+    // containing the eval.
+    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->Push(&value);
+        return;
+      }
+
+      done.Jump(&value);
+
+    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+      // Only generate the fast case for locals that rewrite to slots.
+      // This rules out argument loads.
+      if (potential_slot != NULL) {
+        // Allocate a fresh register to use as a temp in
+        // ContextSlotOperandCheckExtensions and to hold the result
+        // value.
+        value = allocator_->Allocate();
+        ASSERT(value.is_valid());
+        __ mov(value.reg(),
+               ContextSlotOperandCheckExtensions(potential_slot,
+                                                 value,
+                                                 &slow));
+        if (potential_slot->var()->mode() == Variable::CONST) {
+          __ cmp(value.reg(), Factory::the_hole_value());
+          done.Branch(not_equal, &value);
+          __ mov(value.reg(), Factory::undefined_value());
+        }
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions so we have to jump around
+        // it.
+        done.Jump(&value);
+      }
+    }
+
+    slow.Bind();
+    // A runtime call is inevitable.  We eagerly sync frame elements
+    // to memory so that we can push the arguments directly into place
+    // on top of the frame.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(slot->var()->name()));
+    if (typeof_state == INSIDE_TYPEOF) {
+      value =
+          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    } else {
+      value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    }
+
+    done.Bind(&value);
+    frame_->Push(&value);
+
+  } else if (slot->var()->mode() == Variable::CONST) {
+    // Const slots may contain 'the hole' value (the constant hasn't been
+    // initialized yet) which needs to be converted into the 'undefined'
+    // value.
+    //
+    // We currently spill the virtual frame because constants use the
+    // potentially unsafe direct-frame access of SlotOperand.
+    VirtualFrame::SpilledScope spilled_scope;
+    Comment cmnt(masm_, "[ Load const");
+    JumpTarget exit;
+    __ mov(ecx, SlotOperand(slot, ecx));
+    __ cmp(ecx, Factory::the_hole_value());
+    exit.Branch(not_equal);
+    __ mov(ecx, Factory::undefined_value());
+    exit.Bind();
+    frame_->EmitPush(ecx);
+
+  } else if (slot->type() == Slot::PARAMETER) {
+    frame_->PushParameterAt(slot->index());
+
+  } else if (slot->type() == Slot::LOCAL) {
+    frame_->PushLocalAt(slot->index());
+
+  } else {
+    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+    // here.
+    //
+    // The use of SlotOperand below is safe for an unspilled frame
+    // because it will always be a context slot.
+    ASSERT(slot->type() == Slot::CONTEXT);
+    Result temp = allocator_->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                  TypeofState state) {
+  LoadFromSlot(slot, state);
+
+  // Bail out quickly if we're not using lazy arguments allocation.
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+  // ... or if the slot isn't a non-parameter arguments slot.
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+  // Pop the loaded value from the stack.
+  Result value = frame_->Pop();
+
+  // If the loaded value is a constant, we know if the arguments
+  // object has been lazily loaded yet.
+  if (value.is_constant()) {
+    if (value.handle()->IsTheHole()) {
+      Result arguments = StoreArgumentsObject(false);
+      frame_->Push(&arguments);
+    } else {
+      frame_->Push(&value);
+    }
+    return;
+  }
+
+  // The loaded value is in a register. If it is the sentinel that
+  // indicates that we haven't loaded the arguments object yet, we
+  // need to do it now.
+  JumpTarget exit;
+  __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+  frame_->Push(&value);
+  exit.Branch(not_equal);
+  Result arguments = StoreArgumentsObject(false);
+  frame_->SetElementAt(0, &arguments);
+  exit.Bind();
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    JumpTarget* slow) {
+  // Check that no extension objects have been created by calls to
+  // eval from the current scope to the global scope.
+  Register context = esi;
+  Result tmp = allocator_->Allocate();
+  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      // Load next context in chain.
+      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.  If we have reached an eval scope, we check
+    // all extensions from this point.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s != NULL && s->is_eval_scope()) {
+    // Loop up the context chain.  There is no frame effect so it is
+    // safe to use raw labels here.
+    Label next, fast;
+    if (!context.is(tmp.reg())) {
+      __ mov(tmp.reg(), context);
+    }
+    __ bind(&next);
+    // Terminate at global context.
+    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+           Immediate(Factory::global_context_map()));
+    __ j(equal, &fast);
+    // Check that extension is NULL.
+    __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+    slow->Branch(not_equal, not_taken);
+    // Load next context in chain.
+    __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+    __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+    __ jmp(&next);
+    __ bind(&fast);
+  }
+  tmp.Unuse();
+
+  // All extension objects were empty and it is safe to use a global
+  // load IC call.
+  LoadGlobal();
+  frame_->Push(slot->var()->name());
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+                         ? RelocInfo::CODE_TARGET
+                         : RelocInfo::CODE_TARGET_CONTEXT;
+  Result answer = frame_->CallLoadIC(mode);
+  // A test eax instruction following the call signals that the inobject
+  // property case was inlined.  Ensure that there is not a test eax
+  // instruction here.
+  __ nop();
+  // Discard the global object. The result is in answer.
+  frame_->Drop();
+  return answer;
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    // For now, just do a runtime call.  Since the call is inevitable,
+    // we eagerly sync the virtual frame so we can directly push the
+    // arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(slot->var()->name()));
+
+    Result value;
+    if (init_state == CONST_INIT) {
+      // Same as the case for a normal store, but ignores attribute
+      // (e.g. READ_ONLY) of context slot so that we can initialize const
+      // properties (introduced via eval("const foo = (some expr);")). Also,
+      // uses the current function context instead of the top context.
+      //
+      // Note that we must declare the foo upon entry of eval(), via a
+      // context slot declaration, but we cannot initialize it at the same
+      // time, because the const declaration may be at the end of the eval
+      // code (sigh...) and the const variable may have been used before
+      // (where its value is 'undefined'). Thus, we can only do the
+      // initialization when we actually encounter the expression and when
+      // the expression operands are defined and valid, and thus we need the
+      // split into 2 operations: declaration of the context slot followed
+      // by initialization.
+      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+    } else {
+      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+    }
+    // Storing a variable must keep the (new) value on the expression
+    // stack. This is necessary for compiling chained assignment
+    // expressions.
+    frame_->Push(&value);
+
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit;
+    if (init_state == CONST_INIT) {
+      ASSERT(slot->var()->mode() == Variable::CONST);
+      // Only the first const initialization must be executed (the slot
+      // still contains 'the hole' value). When the assignment is executed,
+      // the code is identical to a normal store (see below).
+      //
+      // We spill the frame in the code below because the direct-frame
+      // access of SlotOperand is potentially unsafe with an unspilled
+      // frame.
+      VirtualFrame::SpilledScope spilled_scope;
+      Comment cmnt(masm_, "[ Init const");
+      __ mov(ecx, SlotOperand(slot, ecx));
+      __ cmp(ecx, Factory::the_hole_value());
+      exit.Branch(not_equal);
+    }
+
+    // We must execute the store.  Storing a variable must keep the (new)
+    // value on the stack. This is necessary for compiling assignment
+    // expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will initialize
+    // consts to 'the hole' value and by doing so, end up calling this code.
+    if (slot->type() == Slot::PARAMETER) {
+      frame_->StoreToParameterAt(slot->index());
+    } else if (slot->type() == Slot::LOCAL) {
+      frame_->StoreToLocalAt(slot->index());
+    } else {
+      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+      //
+      // The use of SlotOperand below is safe for an unspilled frame
+      // because the slot is a context slot.
+      ASSERT(slot->type() == Slot::CONTEXT);
+      frame_->Dup();
+      Result value = frame_->Pop();
+      value.ToRegister();
+      Result start = allocator_->Allocate();
+      ASSERT(start.is_valid());
+      __ mov(SlotOperand(slot, start.reg()), value.reg());
+      // RecordWrite may destroy the value registers.
+      //
+      // TODO(204): Avoid actually spilling when the value is not
+      // needed (probably the common case).
+      frame_->Spill(value.reg());
+      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+      // The results start, value, and temp are unused by going out of
+      // scope.
+    }
+
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlotCheckForArguments(node, typeof_state());
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValue(typeof_state());
+  }
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+  Comment cmnt(masm_, "[ Literal");
+  frame_->Push(node->handle());
+}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+  ASSERT(target.is_valid());
+  ASSERT(value->IsSmi());
+  int bits = reinterpret_cast<int>(*value);
+  __ Set(target, Immediate(bits & 0x0000FFFF));
+  __ xor_(target, bits & 0xFFFF0000);
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+  if (!value->IsSmi()) return false;
+  int int_value = Smi::cast(*value)->value();
+  return !is_intn(int_value, kMaxSmiInlinedBits);
+}
+
+
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function.  Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
+ public:
+  DeferredRegExpLiteral(Register boilerplate,
+                        Register literals,
+                        RegExpLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredRegExpLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  RegExpLiteral* node_;
+};
+
+
+void DeferredRegExpLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // RegExp pattern (2).
+  __ push(Immediate(node_->pattern()));
+  // RegExp flags (3).
+  __ push(Immediate(node_->flags()));
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the RegExp object.  If so,
+  // jump to the deferred code passing the literals array.
+  DeferredRegExpLiteral* deferred =
+      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+}
+
+
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function.  Leave the object boilerplate in
+// 'boilerplate'.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+  DeferredObjectLiteral(Register boilerplate,
+                        Register literals,
+                        ObjectLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredObjectLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ push(Immediate(node_->constant_properties()));
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredObjectLiteral* deferred =
+      new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  for (int i = 0; i < node->properties()->length(); i++) {
+    ObjectLiteral::Property* property = node->properties()->at(i);
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+        // else fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        Handle<Object> key(property->key()->handle());
+        if (key->IsSymbol()) {
+          // Duplicate the object as the IC receiver.
+          frame_->Dup();
+          Load(property->value());
+          frame_->Push(key);
+          Result ignored = frame_->CallStoreIC();
+          // Drop the duplicated receiver and ignore the result.
+          frame_->Drop();
+          break;
+        }
+        // Fall through
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(1));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(0));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      default: UNREACHABLE();
+    }
+  }
+}
+
+
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function.  Leave the array boilerplate in 'boilerplate'.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  DeferredArrayLiteral(Register boilerplate,
+                       Register literals,
+                       ArrayLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ push(Immediate(node_->literals()));
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredArrayLiteral* deferred =
+      new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the resulting array literal boilerplate on the stack.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  // Generate code to set the elements in the array that are not
+  // literals.
+  for (int i = 0; i < node->values()->length(); i++) {
+    Expression* value = node->values()->at(i);
+
+    // If value is a literal the property value is already set in the
+    // boilerplate object.
+    if (value->AsLiteral() != NULL) continue;
+    // If value is a materialized literal the property value is already set
+    // in the boilerplate object if it is simple.
+    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+    // The property must be set by generated code.
+    Load(value);
+
+    // Get the property value off the stack.
+    Result prop_value = frame_->Pop();
+    prop_value.ToRegister();
+
+    // Fetch the array literal while leaving a copy on the stack and
+    // use it to get the elements array.
+    frame_->Dup();
+    Result elements = frame_->Pop();
+    elements.ToRegister();
+    frame_->Spill(elements.reg());
+    // Get the elements array.
+    __ mov(elements.reg(),
+           FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+    // Write to the indexed properties array.
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+    // Update the write barrier for the array address.
+    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+  }
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  ASSERT(!in_spilled_code());
+  // Call runtime routine to allocate the catch extension object and
+  // assign the exception value to the catch variable.
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  Load(node->key());
+  Load(node->value());
+  Result result =
+      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+    if (node->starts_initialization_block()) {
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      // Change to slow case in the beginning of an initialization
+      // block to avoid the quadratic behavior of repeatedly adding
+      // fast properties.
+
+      // The receiver is the argument to the runtime call.  It is the
+      // first value pushed when the reference was loaded to the
+      // frame.
+      frame_->PushElementAt(target.size() - 1);
+      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+    }
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      Load(node->value());
+
+    } else {
+      Literal* literal = node->value()->AsLiteral();
+      bool overwrite_value =
+          (node->value()->AsBinaryOperation() != NULL &&
+           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+      // There are two cases where the target is not read in the right hand
+      // side, that are easy to test for: the right hand side is a literal,
+      // or the right hand side is a different variable.  TakeValue invalidates
+      // the target, with an implicit promise that it will be written to again
+      // before it is read.
+      if (literal != NULL || (right_var != NULL && right_var != var)) {
+        target.TakeValue(NOT_INSIDE_TYPEOF);
+      } else {
+        target.GetValue(NOT_INSIDE_TYPEOF);
+      }
+      Load(node->value());
+      GenericBinaryOperation(node->binary_op(),
+                             node->type(),
+                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+
+    if (var != NULL &&
+        var->mode() == Variable::CONST &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+      if (node->ends_initialization_block()) {
+        ASSERT(target.type() == Reference::NAMED ||
+               target.type() == Reference::KEYED);
+        // End of initialization block. Revert to fast case.  The
+        // argument to the runtime call is the receiver, which is the
+        // first value pushed as part of the reference, which is below
+        // the lhs value.
+        frame_->PushElementAt(target.size());
+        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+      }
+    }
+  }
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+  Comment cmnt(masm_, "[ Throw");
+  Load(node->exception());
+  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+  Comment cmnt(masm_, "[ Property");
+  Reference property(this, node);
+  property.GetValue(typeof_state());
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+  Comment cmnt(masm_, "[ Call");
+
+  Expression* function = node->expression();
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Check if the function is a variable or a property.
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+
+    // Prepare the stack for the call to the resolved function.
+    Load(function);
+
+    // Allocate a frame slot for the receiver.
+    frame_->Push(Factory::undefined_value());
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Prepare the stack for the call to ResolvePossiblyDirectEval.
+    frame_->PushElementAt(arg_count + 1);
+    if (arg_count > 0) {
+      frame_->PushElementAt(arg_count);
+    } else {
+      frame_->Push(Factory::undefined_value());
+    }
+
+    // Resolve the call.
+    Result result =
+        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up the stack with the right values for the function and the
+    // receiver.  Use a scratch register to avoid destroying the result.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+    frame_->SetElementAt(arg_count + 1, &scratch);
+
+    // We can reuse the result register now.
+    frame_->Spill(result.reg());
+    __ mov(result.reg(),
+           FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+    frame_->SetElementAt(arg_count, &result);
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    result = frame_->CallStub(&call_function, arg_count + 1);
+
+    // Restore the context and overwrite the function on the stack with
+    // the result.
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    // Push the name of the function and the receiver onto the stack.
+    frame_->Push(var->name());
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Call the IC initialization code.
+    CodeForSourcePosition(node->position());
+    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+                                       arg_count,
+                                       loop_nesting());
+    frame_->RestoreContextRegister();
+    // Replace the function on the stack with the result.
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    // ----------------------------------
+    // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
+    // ----------------------------------
+
+    // Load the function from the context.  Sync the frame so we can
+    // push the arguments directly into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(var->name()));
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    // The runtime call returns a pair of values in eax and edx.  The
+    // looked-up function is in eax and the receiver is in edx.  These
+    // register references are not ref counted here.  We spill them
+    // eagerly since they are arguments to an inevitable call (and are
+    // not sharable by the arguments).
+    ASSERT(!allocator()->is_used(eax));
+    frame_->EmitPush(eax);
+
+    // Load the receiver.
+    ASSERT(!allocator()->is_used(edx));
+    frame_->EmitPush(edx);
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+
+  } else if (property != NULL) {
+    // Check if the key is a literal string.
+    Literal* literal = property->key()->AsLiteral();
+
+    if (literal != NULL && literal->handle()->IsSymbol()) {
+      // ------------------------------------------------------------------
+      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+      // ------------------------------------------------------------------
+
+      Handle<String> name = Handle<String>::cast(literal->handle());
+
+      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+          name->IsEqualTo(CStrVector("apply")) &&
+          args->length() == 2 &&
+          args->at(1)->AsVariableProxy() != NULL &&
+          args->at(1)->AsVariableProxy()->IsArguments()) {
+        // Use the optimized Function.prototype.apply that avoids
+        // allocating lazily allocated arguments objects.
+        CallApplyLazy(property,
+                      args->at(0),
+                      args->at(1)->AsVariableProxy(),
+                      node->position());
+
+      } else {
+        // Push the name of the function and the receiver onto the stack.
+        frame_->Push(name);
+        Load(property->obj());
+
+        // Load the arguments.
+        int arg_count = args->length();
+        for (int i = 0; i < arg_count; i++) {
+          Load(args->at(i));
+        }
+
+        // Call the IC initialization code.
+        CodeForSourcePosition(node->position());
+        Result result =
+            frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
+                               loop_nesting());
+        frame_->RestoreContextRegister();
+        // Replace the function on the stack with the result.
+        frame_->SetElementAt(0, &result);
+      }
+
+    } else {
+      // -------------------------------------------
+      // JavaScript example: 'array[index](1, 2, 3)'
+      // -------------------------------------------
+
+      // Load the function to call from the property through a reference.
+      Reference ref(this, property);
+      ref.GetValue(NOT_INSIDE_TYPEOF);
+
+      // Pass receiver to called function.
+      if (property->is_synthetic()) {
+        // Use global object as receiver.
+        LoadGlobalReceiver();
+      } else {
+        // The reference's size is non-negative.
+        frame_->PushElementAt(ref.size());
+      }
+
+      // Call the function.
+      CallWithArguments(args, node->position());
+    }
+
+  } else {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
+    // ----------------------------------
+
+    // Load the function.
+    Load(function);
+
+    // Pass the global proxy as the receiver.
+    LoadGlobalReceiver();
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+  }
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments. This is different from ordinary calls, where the
+  // actual function to call is resolved after the arguments have been
+  // evaluated.
+
+  // Compute function to call and use the global object as the
+  // receiver. There is no need to use the global proxy here because
+  // it will always be replaced with a newly allocated object.
+  Load(node->expression());
+  LoadGlobal();
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  CodeForSourcePosition(node->position());
+  Result result = frame_->CallConstructor(arg_count);
+  // Replace the function on the stack with the result.
+  frame_->SetElementAt(0, &result);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  value.Unuse();
+  destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (ShouldGenerateLog(args->at(0))) {
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
+  value.Unuse();
+  destination()->Split(zero);
+}
+
+
+// This generates code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It can handle flat and sliced strings, 8 and 16 bit characters and
+// cons strings where the answer is found in the left hand branch of the
+// cons.  The slow case will flatten the string, which will ensure that
+// the answer is in the left hand side the next time around.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateFastCharCodeAt");
+  ASSERT(args->length() == 2);
+
+  Label slow_case;
+  Label end;
+  Label not_a_flat_string;
+  Label a_cons_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Result index = frame_->Pop();
+  Result object = frame_->Pop();
+
+  // Get register ecx to use as shift amount later.
+  Result shift_amount;
+  if (object.is_register() && object.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = object;
+    object = fresh;
+    __ mov(object.reg(), ecx);
+  }
+  if (index.is_register() && index.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = index;
+    index = fresh;
+    __ mov(index.reg(), ecx);
+  }
+  // There could be references to ecx in the frame. Allocating will
+  // spill them, otherwise spill explicitly.
+  if (shift_amount.is_valid()) {
+    frame_->Spill(ecx);
+  } else {
+    shift_amount = allocator()->Allocate(ecx);
+  }
+  ASSERT(shift_amount.is_register());
+  ASSERT(shift_amount.reg().is(ecx));
+  ASSERT(allocator_->count(ecx) == 1);
+
+  // We will mutate the index register and possibly the object register.
+  // The case where they are somehow the same register is handled
+  // because we only mutate them in the case where the receiver is a
+  // heap object and the index is not.
+  object.ToRegister();
+  index.ToRegister();
+  frame_->Spill(object.reg());
+  frame_->Spill(index.reg());
+
+  // We need a single extra temporary register.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+
+  // There is no virtual frame effect from here up to the final result
+  // push.
+
+  // If the receiver is a smi trigger the slow case.
+  ASSERT(kSmiTag == 0);
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  __ j(zero, &slow_case);
+
+  // If the index is negative or non-smi trigger the slow case.
+  ASSERT(kSmiTag == 0);
+  __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
+  __ j(not_zero, &slow_case);
+  // Untag the index.
+  __ sar(index.reg(), kSmiTagSize);
+
+  __ bind(&try_again_with_new_string);
+  // Fetch the instance type of the receiver into ecx.
+  __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the slow case.
+  __ test(ecx, Immediate(kIsNotStringMask));
+  __ j(not_zero, &slow_case);
+
+  // Here we make assumptions about the tag values and the shifts needed.
+  // See the comment in objects.h.
+  ASSERT(kLongStringTag == 0);
+  ASSERT(kMediumStringTag + String::kLongLengthShift ==
+         String::kMediumLengthShift);
+  ASSERT(kShortStringTag + String::kLongLengthShift ==
+         String::kShortLengthShift);
+  __ and_(ecx, kStringSizeMask);
+  __ add(Operand(ecx), Immediate(String::kLongLengthShift));
+  // Fetch the length field into the temporary register.
+  __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+  __ shr(temp.reg());  // The shift amount in ecx is implicit operand.
+  // Check for index out of range.
+  __ cmp(index.reg(), Operand(temp.reg()));
+  __ j(greater_equal, &slow_case);
+  // Reload the instance type (into the temp register this time)..
+  __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+
+  // We need special handling for non-flat strings.
+  ASSERT(kSeqStringTag == 0);
+  __ test(temp.reg(), Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
+  // Check for 1-byte or 2-byte string.
+  __ test(temp.reg(), Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the temp register.
+  __ movzx_w(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_2,
+                                      SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+  // Load the byte into the temp register.
+  __ movzx_b(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_1,
+                                      SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
+  ASSERT(kSmiTag == 0);
+  __ shl(temp.reg(), kSmiTagSize);
+  __ jmp(&end);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(temp.reg(), kStringRepresentationMask);
+  __ cmp(temp.reg(), kConsStringTag);
+  __ j(equal, &a_cons_string);
+  __ cmp(temp.reg(), kSlicedStringTag);
+  __ j(not_equal, &slow_case);
+
+  // SlicedString.
+  // Add the offset to the index and trigger the slow case on overflow.
+  __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+  __ j(overflow, &slow_case);
+  // Getting the underlying string is done by running the cons string code.
+
+  // ConsString.
+  __ bind(&a_cons_string);
+  // Get the first of the two strings.  Both sliced and cons strings
+  // store their source string at the same offset.
+  ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+  __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+  __ jmp(&try_again_with_new_string);
+
+  __ bind(&slow_case);
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ Set(temp.reg(), Immediate(Factory::undefined_value()));
+
+  __ bind(&end);
+  frame_->Push(&temp);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  destination()->false_target()->Branch(equal);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // Check if the object is a JS array or not.
+  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
+  value.Unuse();
+  temp.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  // Get the frame pointer for the calling frame.
+  Result fp = allocator()->Allocate();
+  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &check_frame_marker);
+  __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  fp.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  // ArgumentsAccessStub takes the parameter count as an input argument
+  // in register eax.  Create a constant result for it.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to the arguments.length.
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+  Result result = frame_->CallStub(&stub, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave, null, function, non_function_constructor;
+  Load(args->at(0));  // Load the object.
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  frame_->Spill(obj.reg());
+
+  // If the object is a smi, we return null.
+  __ test(obj.reg(), Immediate(kSmiTagMask));
+  null.Branch(zero);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  { Result tmp = allocator()->Allocate();
+    __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+    __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
+    __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
+    null.Branch(less);
+
+    // As long as JS_FUNCTION_TYPE is the last instance type and it is
+    // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+    // LAST_JS_OBJECT_TYPE.
+    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+    __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
+    function.Branch(equal);
+  }
+
+  // Check if the constructor in the map is a function.
+  { Result tmp = allocator()->Allocate();
+    __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+    __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
+    non_function_constructor.Branch(not_equal);
+  }
+
+  // The map register now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(obj.reg(),
+         FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+  __ mov(obj.reg(),
+         FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
+  frame_->Push(&obj);
+  leave.Jump();
+
+  // Functions have class 'Function'.
+  function.Bind();
+  frame_->Push(Factory::function_class_symbol());
+  leave.Jump();
+
+  // Objects with a non-function constructor have class 'Object'.
+  non_function_constructor.Bind();
+  frame_->Push(Factory::Object_symbol());
+  leave.Jump();
+
+  // Non-JS objects have class null.
+  null.Bind();
+  frame_->Push(Factory::null_value());
+
+  // All done.
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  frame_->Dup();
+  Result object = frame_->Pop();
+  object.ToRegister();
+  ASSERT(object.is_valid());
+  // if (object->IsSmi()) return object.
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, taken);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // if (!object->IsJSValue()) return object.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+  leave.Branch(not_equal, not_taken);
+  __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+  object.Unuse();
+  frame_->SetElementAt(0, &temp);
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  Load(args->at(1));  // Load the value.
+  Result value = frame_->Pop();
+  Result object = frame_->Pop();
+  value.ToRegister();
+  object.ToRegister();
+
+  // if (object->IsSmi()) return value.
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, &value, taken);
+
+  // It is a heap object - get its map.
+  Result scratch = allocator_->Allocate();
+  ASSERT(scratch.is_valid());
+  // if (!object->IsJSValue()) return value.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+  leave.Branch(not_equal, &value, not_taken);
+
+  // Store the value.
+  __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  Result duplicate_value = allocator_->Allocate();
+  ASSERT(duplicate_value.is_valid());
+  __ mov(duplicate_value.reg(), value.reg());
+  // The object register is also overwritten by the write barrier and
+  // possibly aliased in the frame.
+  frame_->Spill(object.reg());
+  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+                 scratch.reg());
+  object.Unuse();
+  scratch.Unuse();
+  duplicate_value.Unuse();
+
+  // Leave.
+  leave.Bind(&value);
+  frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  Load(args->at(0));
+  Result key = frame_->Pop();
+  // Explicitly create a constant result.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to arguments[key].
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  Result result = frame_->CallStub(&stub, &key, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  Load(args->at(0));
+  Load(args->at(1));
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+  right.ToRegister();
+  left.ToRegister();
+  __ cmp(right.reg(), Operand(left.reg()));
+  right.Unuse();
+  left.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  ASSERT(kSmiTag == 0);  // EBP value is aligned, so it should look like Smi.
+  Result ebp_as_smi = allocator_->Allocate();
+  ASSERT(ebp_as_smi.is_valid());
+  __ mov(ebp_as_smi.reg(), Operand(ebp));
+  frame_->Push(&ebp_as_smi);
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  frame_->SpillAll();
+
+  // Make sure the frame is aligned like the OS expects.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    __ mov(edi, Operand(esp));  // Save in callee-saved register.
+    __ and_(esp, -kFrameAlignment);
+  }
+
+  // Call V8::RandomPositiveSmi().
+  __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+  // Restore stack pointer from callee-saved register edi.
+  if (kFrameAlignment > 0) {
+    __ mov(esp, Operand(edi));
+  }
+
+  Result result = allocator_->Allocate(eax);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  JumpTarget done;
+  JumpTarget call_runtime;
+  ASSERT(args->length() == 1);
+
+  // Load number and duplicate it.
+  Load(args->at(0));
+  frame_->Dup();
+
+  // Get the number into an unaliased register and load it onto the
+  // floating point stack still leaving one copy on the frame.
+  Result number = frame_->Pop();
+  number.ToRegister();
+  frame_->Spill(number.reg());
+  FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+  number.Unuse();
+
+  // Perform the operation on the number.
+  switch (op) {
+    case SIN:
+      __ fsin();
+      break;
+    case COS:
+      __ fcos();
+      break;
+  }
+
+  // Go slow case if argument to operation is out of range.
+  Result eax_reg = allocator_->Allocate(eax);
+  ASSERT(eax_reg.is_valid());
+  __ fnstsw_ax();
+  __ sahf();
+  eax_reg.Unuse();
+  call_runtime.Branch(parity_even, not_taken);
+
+  // Allocate heap number for result if possible.
+  Result scratch1 = allocator()->Allocate();
+  Result scratch2 = allocator()->Allocate();
+  Result heap_number = allocator()->Allocate();
+  FloatingPointHelper::AllocateHeapNumber(masm_,
+                                          call_runtime.entry_label(),
+                                          scratch1.reg(),
+                                          scratch2.reg(),
+                                          heap_number.reg());
+  scratch1.Unuse();
+  scratch2.Unuse();
+
+  // Store the result in the allocated heap number.
+  __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+  // Replace the extra copy of the argument with the result.
+  frame_->SetElementAt(0, &heap_number);
+  done.Jump();
+
+  call_runtime.Bind();
+  // Free ST(0) which was not popped before calling into the runtime.
+  __ ffree(0);
+  Result answer;
+  switch (op) {
+    case SIN:
+      answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->Push(&answer);
+  done.Bind();
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  if (CheckForInlineRuntimeCall(node)) {
+    return;
+  }
+
+  ZoneList<Expression*>* args = node->arguments();
+  Comment cmnt(masm_, "[ CallRuntime");
+  Runtime::Function* function = node->function();
+
+  if (function == NULL) {
+    // Prepare stack for calling JS runtime function.
+    frame_->Push(node->name());
+    // Push the builtins object found in the current global object.
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), GlobalObject());
+    __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+    frame_->Push(&temp);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  if (function == NULL) {
+    // Call the JS runtime function.
+    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+                                       arg_count,
+                                       loop_nesting_);
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &answer);
+  } else {
+    // Call the C runtime function.
+    Result answer = frame_->CallRuntime(function, arg_count);
+    frame_->Push(&answer);
+  }
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  // Note that because of NOT and an optimization in comparison of a typeof
+  // expression to a literal string, this function can fail to leave a value
+  // on top of the frame or in the cc register.
+  Comment cmnt(masm_, "[ UnaryOperation");
+
+  Token::Value op = node->op();
+
+  if (op == Token::NOT) {
+    // Swap the true and false targets but keep the same actual label
+    // as the fall through.
+    destination()->Invert();
+    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+    // Swap the labels back.
+    destination()->Invert();
+
+  } else if (op == Token::DELETE) {
+    Property* property = node->expression()->AsProperty();
+    if (property != NULL) {
+      Load(property->obj());
+      Load(property->key());
+      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+      frame_->Push(&answer);
+      return;
+    }
+
+    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+    if (variable != NULL) {
+      Slot* slot = variable->slot();
+      if (variable->is_global()) {
+        LoadGlobal();
+        frame_->Push(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+
+      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+        // Call the runtime to look up the context holding the named
+        // variable.  Sync the virtual frame eagerly so we can push the
+        // arguments directly into place.
+        frame_->SyncRange(0, frame_->element_count() - 1);
+        frame_->EmitPush(esi);
+        frame_->EmitPush(Immediate(variable->name()));
+        Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+        ASSERT(context.is_register());
+        frame_->EmitPush(context.reg());
+        context.Unuse();
+        frame_->EmitPush(Immediate(variable->name()));
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+      }
+
+      // Default: Result of deleting non-global, not dynamically
+      // introduced variables is false.
+      frame_->Push(Factory::false_value());
+
+    } else {
+      // Default: Result of deleting expressions is true.
+      Load(node->expression());  // may have side-effects
+      frame_->SetElementAt(0, Factory::true_value());
+    }
+
+  } else if (op == Token::TYPEOF) {
+    // Special case for loading the typeof expression; see comment on
+    // LoadTypeofExpression().
+    LoadTypeofExpression(node->expression());
+    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->Push(&answer);
+
+  } else if (op == Token::VOID) {
+    Expression* expression = node->expression();
+    if (expression && expression->AsLiteral() && (
+        expression->AsLiteral()->IsTrue() ||
+        expression->AsLiteral()->IsFalse() ||
+        expression->AsLiteral()->handle()->IsNumber() ||
+        expression->AsLiteral()->handle()->IsString() ||
+        expression->AsLiteral()->handle()->IsJSRegExp() ||
+        expression->AsLiteral()->IsNull())) {
+      // Omit evaluating the value of the primitive literal.
+      // It will be discarded anyway, and can have no side effect.
+      frame_->Push(Factory::undefined_value());
+    } else {
+      Load(node->expression());
+      frame_->SetElementAt(0, Factory::undefined_value());
+    }
+
+  } else {
+    Load(node->expression());
+    switch (op) {
+      case Token::SUB: {
+        bool overwrite =
+            (node->AsBinaryOperation() != NULL &&
+             node->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
+        // TODO(1222589): remove dependency of TOS being cached inside stub
+        Result operand = frame_->Pop();
+        Result answer = frame_->CallStub(&stub, &operand);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::BIT_NOT: {
+        // Smi check.
+        JumpTarget smi_label;
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        smi_label.Branch(zero, &operand, taken);
+
+        frame_->Push(&operand);  // undo popping of TOS
+        Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+                                              CALL_FUNCTION, 1);
+
+        continue_label.Jump(&answer);
+        smi_label.Bind(&answer);
+        answer.ToRegister();
+        frame_->Spill(answer.reg());
+        __ not_(answer.reg());
+        __ and_(answer.reg(), ~kSmiTagMask);  // Remove inverted smi-tag.
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::ADD: {
+        // Smi check.
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        continue_label.Branch(zero, &operand, taken);
+
+        frame_->Push(&operand);
+        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+                                              CALL_FUNCTION, 1);
+
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      default:
+        // NOT, DELETE, TYPEOF, and VOID are handled outside the
+        // switch.
+        UNREACHABLE();
+    }
+  }
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub.  The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+  DeferredPrefixCountOperation(Register dst, bool is_increment)
+      : dst_(dst), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  bool is_increment_;
+};
+
+
+void DeferredPrefixCountOperation::Generate() {
+  // Undo the optimistic smi operation.
+  if (is_increment_) {
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+  } else {
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+  }
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation and call
+// into the runtime to convert the argument to a number.  Update the
+// original value in old.  Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+      : dst_(dst), old_(old), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Register old_;
+  bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+  // Undo the optimistic smi operation.
+  if (is_increment_) {
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+  } else {
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+  }
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+  // Save the result of ToNumber to use as the old value.
+  __ push(eax);
+
+  // Call the runtime for the addition or subtraction.
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(old_);
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  Comment cmnt(masm_, "[ CountOperation");
+
+  bool is_postfix = node->is_postfix();
+  bool is_increment = node->op() == Token::INC;
+
+  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+  bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+  // Postfix operations need a stack slot under the reference to hold
+  // the old value while the new value is being stored.  This is so that
+  // in the case that storing the new value requires a call, the old
+  // value will be in the frame to be spilled.
+  if (is_postfix) frame_->Push(Smi::FromInt(0));
+
+  { Reference target(this, node->expression());
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    target.TakeValue(NOT_INSIDE_TYPEOF);
+
+    Result new_value = frame_->Pop();
+    new_value.ToRegister();
+
+    Result old_value;  // Only allocated in the postfix case.
+    if (is_postfix) {
+      // Allocate a temporary to preserve the old value.
+      old_value = allocator_->Allocate();
+      ASSERT(old_value.is_valid());
+      __ mov(old_value.reg(), new_value.reg());
+    }
+    // Ensure the new value is writable.
+    frame_->Spill(new_value.reg());
+
+    // In order to combine the overflow and the smi tag check, we need
+    // to be able to allocate a byte register.  We attempt to do so
+    // without spilling.  If we fail, we will generate separate overflow
+    // and smi tag checks.
+    //
+    // We allocate and clear the temporary byte register before
+    // performing the count operation since clearing the register using
+    // xor will clear the overflow flag.
+    Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
+    if (tmp.is_valid()) {
+      __ Set(tmp.reg(), Immediate(0));
+    }
+
+    DeferredCode* deferred = NULL;
+    if (is_postfix) {
+      deferred = new DeferredPostfixCountOperation(new_value.reg(),
+                                                   old_value.reg(),
+                                                   is_increment);
+    } else {
+      deferred = new DeferredPrefixCountOperation(new_value.reg(),
+                                                  is_increment);
+    }
+
+    if (is_increment) {
+      __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    } else {
+      __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    }
+
+    // If the count operation didn't overflow and the result is a valid
+    // smi, we're done. Otherwise, we jump to the deferred slow-case
+    // code.
+    if (tmp.is_valid()) {
+      // We combine the overflow and the smi tag check if we could
+      // successfully allocate a temporary byte register.
+      __ setcc(overflow, tmp.reg());
+      __ or_(Operand(tmp.reg()), new_value.reg());
+      __ test(tmp.reg(), Immediate(kSmiTagMask));
+      tmp.Unuse();
+      deferred->Branch(not_zero);
+    } else {
+      // Otherwise we test separately for overflow and smi tag.
+      deferred->Branch(overflow);
+      __ test(new_value.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    }
+    deferred->BindExit();
+
+    // Postfix: store the old value in the allocated slot under the
+    // reference.
+    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+    frame_->Push(&new_value);
+    // Non-constant: update the reference.
+    if (!is_const) target.SetValue(NOT_CONST_INIT);
+  }
+
+  // Postfix: drop the new value and use the old.
+  if (is_postfix) frame_->Drop();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  // Note that due to an optimization in comparison operations (typeof
+  // compared to a string literal), we can evaluate a binary expression such
+  // as AND or OR and not leave a value on the frame or in the cc register.
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = node->op();
+
+  // According to ECMA-262 section 11.11, page 58, the binary logical
+  // operators must yield the result of one of the two expressions
+  // before any ToBoolean() conversions. This means that the value
+  // produced by a && or || operator is not necessarily a boolean.
+
+  // NOTE: If the left hand side produces a materialized value (not
+  // control flow), we force the right hand side to do the same. This
+  // is necessary because we assume that if we get control flow on the
+  // last path out of an expression we got it on all paths.
+  if (op == Token::AND) {
+    JumpTarget is_true;
+    ControlDestination dest(&is_true, destination()->false_target(), true);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.false_was_fall_through()) {
+      // The current false target was used as the fall-through.  If
+      // there are no dangling jumps to is_true then the left
+      // subexpression was unconditionally false.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_true.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current false target was a forward jump then we have a
+        // valid frame, we have just bound the false target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->false_target()->Unuse();
+          destination()->false_target()->Jump();
+        }
+        is_true.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have actually just jumped to or bound the current false
+        // target but the current control destination is not marked as
+        // used.
+        destination()->Use(false);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_true
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_true
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'false' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&pop_and_continue, &exit, true);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_true.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else if (op == Token::OR) {
+    JumpTarget is_false;
+    ControlDestination dest(destination()->true_target(), &is_false, false);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.true_was_fall_through()) {
+      // The current true target was used as the fall-through.  If
+      // there are no dangling jumps to is_false then the left
+      // subexpression was unconditionally true.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_false.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current true target was a forward jump then we have a
+        // valid frame, we have just bound the true target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->true_target()->Unuse();
+          destination()->true_target()->Jump();
+        }
+        is_false.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have just jumped to or bound the current true target but
+        // the current control destination is not marked as used.
+        destination()->Use(true);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_false
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_false
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'true' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&exit, &pop_and_continue, false);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_false.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else {
+    // NOTE: The code below assumes that the slow cases (calls to runtime)
+    // never return a constant/immutable object.
+    OverwriteMode overwrite_mode = NO_OVERWRITE;
+    if (node->left()->AsBinaryOperation() != NULL &&
+        node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_LEFT;
+    } else if (node->right()->AsBinaryOperation() != NULL &&
+               node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_RIGHT;
+    }
+
+    Load(node->left());
+    Load(node->right());
+    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+  }
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  frame_->PushFunction();
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  Comment cmnt(masm_, "[ CompareOperation");
+
+  // Get the expressions from the node.
+  Expression* left = node->left();
+  Expression* right = node->right();
+  Token::Value op = node->op();
+  // To make typeof testing for natives implemented in JavaScript really
+  // efficient, we generate special code for expressions of the form:
+  // 'typeof <expression> == <string>'.
+  UnaryOperation* operation = left->AsUnaryOperation();
+  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+      (operation != NULL && operation->op() == Token::TYPEOF) &&
+      (right->AsLiteral() != NULL &&
+       right->AsLiteral()->handle()->IsString())) {
+    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+    // Load the operand and move it to a register.
+    LoadTypeofExpression(operation->expression());
+    Result answer = frame_->Pop();
+    answer.ToRegister();
+
+    if (check->Equals(Heap::number_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->true_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ cmp(answer.reg(), Factory::heap_number_map());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::string_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+
+      // It can be an undetectable string object.
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+      __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
+      temp.Unuse();
+      answer.Unuse();
+      destination()->Split(less);
+
+    } else if (check->Equals(Heap::boolean_symbol())) {
+      __ cmp(answer.reg(), Factory::true_value());
+      destination()->true_target()->Branch(equal);
+      __ cmp(answer.reg(), Factory::false_value());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::undefined_symbol())) {
+      __ cmp(answer.reg(), Factory::undefined_value());
+      destination()->true_target()->Branch(equal);
+
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+
+      // It can be an undetectable object.
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(answer.reg(),
+                 FieldOperand(answer.reg(), Map::kBitFieldOffset));
+      __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+      answer.Unuse();
+      destination()->Split(not_zero);
+
+    } else if (check->Equals(Heap::function_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::object_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      __ cmp(answer.reg(), Factory::null_value());
+      destination()->true_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      Result map = allocator()->Allocate();
+      ASSERT(map.is_valid());
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+      __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+      __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+      destination()->false_target()->Branch(less);
+      __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+      answer.Unuse();
+      map.Unuse();
+      destination()->Split(less_equal);
+    } else {
+      // Uncommon case: typeof testing against a string literal that is
+      // never returned from the typeof operator.
+      answer.Unuse();
+      destination()->Goto(false);
+    }
+    return;
+  }
+
+  Condition cc = no_condition;
+  bool strict = false;
+  switch (op) {
+    case Token::EQ_STRICT:
+      strict = true;
+      // Fall through
+    case Token::EQ:
+      cc = equal;
+      break;
+    case Token::LT:
+      cc = less;
+      break;
+    case Token::GT:
+      cc = greater;
+      break;
+    case Token::LTE:
+      cc = less_equal;
+      break;
+    case Token::GTE:
+      cc = greater_equal;
+      break;
+    case Token::IN: {
+      Load(left);
+      Load(right);
+      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+      frame_->Push(&answer);  // push the result
+      return;
+    }
+    case Token::INSTANCEOF: {
+      Load(left);
+      Load(right);
+      InstanceofStub stub;
+      Result answer = frame_->CallStub(&stub, 2);
+      answer.ToRegister();
+      __ test(answer.reg(), Operand(answer.reg()));
+      answer.Unuse();
+      destination()->Split(zero);
+      return;
+    }
+    default:
+      UNREACHABLE();
+  }
+  Load(left);
+  Load(right);
+  Comparison(cc, strict, destination());
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+  return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
+      && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
+      && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
+      && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
+      && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
+}
+#endif
+
+
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst.  The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceGetNamedValue(Register dst,
+                                 Register receiver,
+                                 Handle<String> name)
+      : dst_(dst), receiver_(receiver),  name_(name) {
+    set_comment("[ DeferredReferenceGetNamedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+  __ push(receiver_);
+  __ Set(ecx, Immediate(name_));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The call must be followed by a test eax instruction to indicate
+  // that the inobject property case was inlined.
+  //
+  // Store the delta to the map check instruction here in the test
+  // instruction.  Use masm_-> instead of the __ macro since the
+  // latter can't return a value.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetKeyedValue(Register dst,
+                                          Register receiver,
+                                          Register key,
+                                          bool is_global)
+      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Register key_;
+  bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  __ push(receiver_);  // First IC argument.
+  __ push(key_);       // Second IC argument.
+
+  // Calculate the delta from the IC call instruction to the map check
+  // cmp instruction in the inlined version.  This delta is stored in
+  // a test(eax, delta) instruction after the call so that we can find
+  // it in the IC initialization code and patch the cmp instruction.
+  // This means that we cannot allow test instructions after calls to
+  // KeyedLoadIC stubs in other places.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode mode = is_global_
+                         ? RelocInfo::CODE_TARGET_CONTEXT
+                         : RelocInfo::CODE_TARGET;
+  __ call(ic, mode);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the __
+  // macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+  Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+  // Push receiver and key arguments on the stack.
+  __ push(receiver_);
+  __ push(key_);
+  // Move value argument to eax as expected by the IC stub.
+  if (!value_.is(eax)) __ mov(eax, value_);
+  // Call the IC stub.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the
+  // __ macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  // Restore value (returned from store IC), key and receiver
+  // registers.
+  if (!value_.is(eax)) __ mov(value_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+
+  // Record the source position for the property load.
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Load from Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+      break;
+    }
+
+    case NAMED: {
+      // TODO(1241834): Make sure that it is safe to ignore the
+      // distinction between expressions in a typeof and not in a
+      // typeof. If there is a chance that reference errors can be
+      // thrown below, we must distinguish between the two kinds of
+      // loads (typeof expression loads must not throw a reference
+      // error).
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Do not inline the inobject property case for loads from the global
+      // object.  Also do not inline for unoptimized code.  This saves time
+      // in the code generator.  Unoptimized code is toplevel code or code
+      // that is not in a loop.
+      if (is_global ||
+          cgen_->scope()->is_global_scope() ||
+          cgen_->loop_nesting() == 0) {
+        Comment cmnt(masm, "[ Load from named Property");
+        cgen_->frame()->Push(GetName());
+
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallLoadIC(mode);
+        // A test eax instruction following the call signals that the
+        // inobject property case was inlined.  Ensure that there is not
+        // a test eax instruction here.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      } else {
+        // Inline the inobject property case.
+        Comment cmnt(masm, "[ Inlined named property load");
+        Result receiver = cgen_->frame()->Pop();
+        receiver.ToRegister();
+
+        Result value = cgen_->allocator()->Allocate();
+        ASSERT(value.is_valid());
+        DeferredReferenceGetNamedValue* deferred =
+            new DeferredReferenceGetNamedValue(value.reg(),
+                                               receiver.reg(),
+                                               GetName());
+
+        // Check that the receiver is a heap object.
+        __ test(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        __ bind(deferred->patch_site());
+        // This is the map check instruction that will be patched (so we can't
+        // use the double underscore macro that may insert instructions).
+        // Initially use an invalid map to force a failure.
+        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                  Immediate(Factory::null_value()));
+        // This branch is always a forwards branch so it's always a fixed
+        // size which allows the assert below to succeed and patching to work.
+        deferred->Branch(not_equal);
+
+        // The delta from the patch label to the load offset must be
+        // statically known.
+        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+               LoadIC::kOffsetToLoadInstruction);
+        // The initial (invalid) offset has to be large enough to force
+        // a 32-bit instruction encoding to allow patching with an
+        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
+        int offset = kMaxInt;
+        masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
+
+        __ IncrementCounter(&Counters::named_load_inline, 1);
+        deferred->BindExit();
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&value);
+      }
+      break;
+    }
+
+    case KEYED: {
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof.
+      Comment cmnt(masm, "[ Load from keyed Property");
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Inline array load code if inside of a loop.  We do not know
+      // the receiver map yet, so we initially generate the code with
+      // a check against an invalid map.  In the inline cache code, we
+      // patch the map check if appropriate.
+      if (cgen_->loop_nesting() > 0) {
+        Comment cmnt(masm, "[ Inlined load from keyed Property");
+
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        // Use a fresh temporary to load the elements without destroying
+        // the receiver which is needed for the deferred slow case.
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+
+        // Use a fresh temporary for the index and later the loaded
+        // value.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+
+        DeferredReferenceGetKeyedValue* deferred =
+            new DeferredReferenceGetKeyedValue(index.reg(),
+                                               receiver.reg(),
+                                               key.reg(),
+                                               is_global);
+
+        // Check that the receiver is not a smi (only needed if this
+        // is not a load from the global context) and that it has the
+        // expected map.
+        if (!is_global) {
+          __ test(receiver.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(zero);
+        }
+
+        // Initially, use an invalid map. The map is patched in the IC
+        // initialization code.
+        __ bind(deferred->patch_site());
+        // Use masm-> here instead of the double underscore macro since extra
+        // coverage code can interfere with the patching.
+        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                  Immediate(Factory::null_value()));
+        deferred->Branch(not_equal);
+
+        // Check that the key is a smi.
+        __ test(key.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ mov(elements.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+               Immediate(Factory::fixed_array_map()));
+        deferred->Branch(not_equal);
+
+        // Shift the key to get the actual index value and check that
+        // it is within bounds.
+        __ mov(index.reg(), key.reg());
+        __ sar(index.reg(), kSmiTagSize);
+        __ cmp(index.reg(),
+               FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+        deferred->Branch(above_equal);
+
+        // Load and check that the result is not the hole.  We could
+        // reuse the index or elements register for the value.
+        //
+        // TODO(206): Consider whether it makes sense to try some
+        // heuristic about which register to reuse.  For example, if
+        // one is eax, the we can reuse that one because the value
+        // coming from the deferred code will be in eax.
+        Result value = index;
+        __ mov(value.reg(), Operand(elements.reg(),
+                                    index.reg(),
+                                    times_4,
+                                    FixedArray::kHeaderSize - kHeapObjectTag));
+        elements.Unuse();
+        index.Unuse();
+        __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+        deferred->Branch(equal);
+        __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+        deferred->BindExit();
+        // Restore the receiver and key to the frame and push the
+        // result on top of it.
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+
+      } else {
+        Comment cmnt(masm, "[ Load from keyed Property");
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed load.  The explicit nop instruction is here because
+        // the push that follows might be peep-hole optimized away.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::TakeValue(TypeofState typeof_state) {
+  // For non-constant frame-allocated slots, we invalidate the value in the
+  // slot.  For all others, we fall back on GetValue.
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(!is_illegal());
+  if (type_ != SLOT) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP ||
+      slot->type() == Slot::CONTEXT ||
+      slot->var()->mode() == Variable::CONST ||
+      slot->is_arguments()) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  // Only non-constant, frame-allocated parameters and locals can
+  // reach here. Be careful not to use the optimizations for arguments
+  // object access since it may not have been initialized yet.
+  ASSERT(!slot->is_arguments());
+  if (slot->type() == Slot::PARAMETER) {
+    cgen_->frame()->TakeParameterAt(slot->index());
+  } else {
+    ASSERT(slot->type() == Slot::LOCAL);
+    cgen_->frame()->TakeLocalAt(slot->index());
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->StoreToSlot(slot, init_state);
+      break;
+    }
+
+    case NAMED: {
+      Comment cmnt(masm, "[ Store to named Property");
+      cgen_->frame()->Push(GetName());
+      Result answer = cgen_->frame()->CallStoreIC();
+      cgen_->frame()->Push(&answer);
+      break;
+    }
+
+    case KEYED: {
+      Comment cmnt(masm, "[ Store to keyed Property");
+
+      // Generate inlined version of the keyed store if the code is in
+      // a loop and the key is likely to be a smi.
+      Property* property = expression()->AsProperty();
+      ASSERT(property != NULL);
+      SmiAnalysis* key_smi_analysis = property->key()->type();
+
+      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+        Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+        // Get the receiver, key and value into registers.
+        Result value = cgen_->frame()->Pop();
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+
+        Result tmp = cgen_->allocator_->Allocate();
+        ASSERT(tmp.is_valid());
+
+        // Determine whether the value is a constant before putting it
+        // in a register.
+        bool value_is_constant = value.is_constant();
+
+        // Make sure that value, key and receiver are in registers.
+        value.ToRegister();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        DeferredReferenceSetKeyedValue* deferred =
+            new DeferredReferenceSetKeyedValue(value.reg(),
+                                               key.reg(),
+                                               receiver.reg());
+
+        // Check that the value is a smi if it is not a constant.  We
+        // can skip the write barrier for smis and constants.
+        if (!value_is_constant) {
+          __ test(value.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+        }
+
+        // Check that the key is a non-negative smi.
+        __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+        deferred->Branch(not_zero);
+
+        // Check that the receiver is not a smi.
+        __ test(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        // Check that the receiver is a JSArray.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+        __ movzx_b(tmp.reg(),
+                   FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+        __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+        deferred->Branch(not_equal);
+
+        // Check that the key is within bounds.  Both the key and the
+        // length of the JSArray are smis.
+        __ cmp(key.reg(),
+               FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+        deferred->Branch(greater_equal);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        // Bind the deferred code patch site to be able to locate the
+        // fixed array map comparison.  When debugging, we patch this
+        // comparison to always fail so that we will hit the IC call
+        // in the deferred code which will allow the debugger to
+        // break for fast case stores.
+        __ bind(deferred->patch_site());
+        __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+               Immediate(Factory::fixed_array_map()));
+        deferred->Branch(not_equal);
+
+        // Store the value.
+        __ mov(Operand(tmp.reg(),
+                       key.reg(),
+                       times_2,
+                       FixedArray::kHeaderSize - kHeapObjectTag),
+               value.reg());
+        __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+        deferred->BindExit();
+
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+      } else {
+        Result answer = cgen_->frame()->CallKeyedStoreIC();
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed store.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+  Label false_result, true_result, not_string;
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // 'null' => false.
+  __ cmp(eax, Factory::null_value());
+  __ j(equal, &false_result);
+
+  // Get the map and type of the heap object.
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+  // Undetectable => false.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ and_(ebx, 1 << Map::kIsUndetectable);
+  __ j(not_zero, &false_result);
+
+  // JavaScript object => true.
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(above_equal, &true_result);
+
+  // String value => false iff empty.
+  __ cmp(ecx, FIRST_NONSTRING_TYPE);
+  __ j(above_equal, &not_string);
+  __ and_(ecx, kStringSizeMask);
+  __ cmp(ecx, kShortStringTag);
+  __ j(not_equal, &true_result);  // Empty string is always short.
+  __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+  __ shr(edx, String::kShortLengthShift);
+  __ j(zero, &false_result);
+  __ jmp(&true_result);
+
+  __ bind(&not_string);
+  // HeapNumber => false iff +0, -0, or NaN.
+  __ cmp(edx, Factory::heap_number_map());
+  __ j(not_equal, &true_result);
+  __ fldz();
+  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ fucompp();
+  __ push(eax);
+  __ fnstsw_ax();
+  __ sahf();
+  __ pop(eax);
+  __ j(zero, &false_result);
+  // Fall through to |true_result|.
+
+  // Return 1/0 for true/false in eax.
+  __ bind(&true_result);
+  __ mov(eax, 1);
+  __ ret(1 * kPointerSize);
+  __ bind(&false_result);
+  __ mov(eax, 0);
+  __ ret(1 * kPointerSize);
+}
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+  // Perform fast-case smi code for the operation (eax <op> ebx) and
+  // leave result in register eax.
+
+  // Prepare the smi check of both operands by or'ing them together
+  // before checking against the smi mask.
+  __ mov(ecx, Operand(ebx));
+  __ or_(ecx, Operand(eax));
+
+  switch (op_) {
+    case Token::ADD:
+      __ add(eax, Operand(ebx));  // add optimistically
+      __ j(overflow, slow, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(eax, Operand(ebx));  // subtract optimistically
+      __ j(overflow, slow, not_taken);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Sign extend eax into edx:eax.
+      __ cdq();
+      // Check for 0 divisor.
+      __ test(ebx, Operand(ebx));
+      __ j(zero, slow, not_taken);
+      break;
+
+    default:
+      // Fall-through to smi check.
+      break;
+  }
+
+  // Perform the actual smi check.
+  ASSERT(kSmiTag == 0);  // adjust zero check if not the case
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, slow, not_taken);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+      // Do nothing here.
+      break;
+
+    case Token::MUL:
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // adjust code below if not the case
+      // Remove tag from one of the operands (but keep sign).
+      __ sar(eax, kSmiTagSize);
+      // Do multiplication.
+      __ imul(eax, Operand(ebx));  // multiplication of smis; result in eax
+      // Go slow on overflows.
+      __ j(overflow, slow, not_taken);
+      // Check for negative zero result.
+      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      break;
+
+    case Token::DIV:
+      // Divide edx:eax by ebx.
+      __ idiv(ebx);
+      // Check for the corner case of dividing the most negative smi
+      // by -1. We cannot use the overflow flag, since it is not set
+      // by idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      __ j(equal, slow);
+      // Check for negative zero result.
+      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      __ j(not_zero, slow);
+      // Tag the result and store it in register eax.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      break;
+
+    case Token::MOD:
+      // Divide edx:eax by ebx.
+      __ idiv(ebx);
+      // Check for negative zero result.
+      __ NegativeZeroTest(edx, ecx, slow);  // use ecx = x | y
+      // Move remainder to register eax.
+      __ mov(eax, Operand(edx));
+      break;
+
+    case Token::BIT_OR:
+      __ or_(eax, Operand(ebx));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(eax, Operand(ebx));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(eax, Operand(ebx));
+      break;
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Move the second operand into register ecx.
+      __ mov(ecx, Operand(ebx));
+      // Remove tags from operands (but keep sign).
+      __ sar(eax, kSmiTagSize);
+      __ sar(ecx, kSmiTagSize);
+      // Perform the operation.
+      switch (op_) {
+        case Token::SAR:
+          __ sar(eax);
+          // No checks of result necessary
+          break;
+        case Token::SHR:
+          __ shr(eax);
+          // Check that the *unsigned* result fits in a smi.
+          // Neither of the two high-order bits can be set:
+          // - 0x80000000: high bit would be lost when smi tagging.
+          // - 0x40000000: this number would convert to negative when
+          // Smi tagging these two cases can only happen with shifts
+          // by 0 or 1 when handed a valid smi.
+          __ test(eax, Immediate(0xc0000000));
+          __ j(not_zero, slow, not_taken);
+          break;
+        case Token::SHL:
+          __ shl(eax);
+          // Check that the *signed* result fits in a smi.
+          __ cmp(eax, 0xc0000000);
+          __ j(sign, slow, not_taken);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      // Tag the result and store it in register eax.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (flags_ == SMI_CODE_IN_STUB) {
+    // The fast case smi code wasn't inlined in the stub caller
+    // code. Generate it here to speed up common operations.
+    Label slow;
+    __ mov(ebx, Operand(esp, 1 * kPointerSize));  // get y
+    __ mov(eax, Operand(esp, 2 * kPointerSize));  // get x
+    GenerateSmiCode(masm, &slow);
+    __ ret(2 * kPointerSize);  // remove both operands
+
+    // Too bad. The fast case smi code didn't succeed.
+    __ bind(&slow);
+  }
+
+  // Setup registers.
+  __ mov(eax, Operand(esp, 1 * kPointerSize));  // get y
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // get x
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      // eax: y
+      // edx: x
+
+      if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+        CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+        FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(2 * kPointerSize);
+
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
+      }
+    }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+      FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+      Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+      // Reserve space for converted numbers.
+      __ sub(Operand(esp), Immediate(2 * kPointerSize));
+
+      if (use_sse3_) {
+        // Truncate the operands to 32-bit integers and check for
+        // exceptions in doing so.
+        CpuFeatures::Scope scope(CpuFeatures::SSE3);
+        __ fisttp_s(Operand(esp, 0 * kPointerSize));
+        __ fisttp_s(Operand(esp, 1 * kPointerSize));
+        __ fnstsw_ax();
+        __ test(eax, Immediate(1));
+        __ j(not_zero, &operand_conversion_failure);
+      } else {
+        // Check if right operand is int32.
+        __ fist_s(Operand(esp, 0 * kPointerSize));
+        __ fild_s(Operand(esp, 0 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        __ sahf();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+
+        // Check if left operand is int32.
+        __ fist_s(Operand(esp, 1 * kPointerSize));
+        __ fild_s(Operand(esp, 1 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        __ sahf();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+      }
+
+      // Get int32 operands and perform bitop.
+      __ pop(ecx);
+      __ pop(eax);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar(eax); break;
+        case Token::SHL: __ shl(eax); break;
+        case Token::SHR: __ shr(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &non_smi_result);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      __ ret(2 * kPointerSize);
+
+      // All ops except SHR return a signed int32 that we load in a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+                                                    ecx, edx, eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        __ mov(Operand(esp, 1 * kPointerSize), ebx);
+        __ fild_s(Operand(esp, 1 * kPointerSize));
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
+      }
+
+      // Clear the FPU exception flag and reset the stack before calling
+      // the runtime system.
+      __ bind(&operand_conversion_failure);
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+      if (use_sse3_) {
+        // If we've used the SSE3 instructions for truncating the
+        // floating point values to integers and it failed, we have a
+        // pending #IA exception. Clear it.
+        __ fnclex();
+      } else {
+        // The non-SSE3 variant does early bailout if the right
+        // operand isn't a 32-bit integer, so we may have a single
+        // value on the FPU stack we need to get rid of.
+        __ ffree(0);
+      }
+
+      // SHR should return uint32 - go to runtime for non-smi/negative result.
+      if (op_ == Token::SHR) {
+        __ bind(&non_smi_result);
+      }
+      __ mov(eax, Operand(esp, 1 * kPointerSize));
+      __ mov(edx, Operand(esp, 2 * kPointerSize));
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD: {
+      // Test for string arguments before calling runtime.
+      Label not_strings, both_strings, not_string1, string1;
+      Result answer;
+      __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
+      __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(zero, &not_string1);
+      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+      __ j(above_equal, &not_string1);
+
+      // First argument is a a string, test second.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &string1);
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ j(above_equal, &string1);
+
+      // First and second argument are strings.
+      __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+      // Only first argument is a string.
+      __ bind(&string1);
+      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+      // First argument was not a string, test second.
+      __ bind(&not_string1);
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &not_strings);
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ j(above_equal, &not_strings);
+
+      // Only second argument is a string.
+      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+      __ bind(&not_strings);
+      // Neither argument is a string.
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    }
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+        break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+                                             Label* need_gc,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Register result) {
+  // Allocate heap number in new space.
+  __ AllocateInNewSpace(HeapNumber::kSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
+
+  // Set the map.
+  __ mov(FieldOperand(result, HeapObject::kMapOffset),
+         Immediate(Factory::heap_number_map()));
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register number) {
+  Label load_smi, done;
+
+  __ test(number, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ sar(number, kSmiTagSize);
+  __ push(number);
+  __ fild_s(Operand(esp, 0));
+  __ pop(number);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+                                           Label* not_numbers) {
+  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  // Load operand in edx into xmm0, or branch to not_numbers.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ bind(&load_eax);
+  // Load operand in eax into xmm1, or branch to not_numbers.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(equal, &load_float_eax);
+  __ jmp(not_numbers);  // Argument in eax is not a number.
+  __ bind(&load_smi_edx);
+  __ sar(edx, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ shl(edx, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&load_eax);
+  __ bind(&load_smi_eax);
+  __ sar(eax, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm1, Operand(eax));
+  __ shl(eax, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&done);
+  __ bind(&load_float_eax);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+                                            Register scratch) {
+  Label load_smi_1, load_smi_2, done_load_1, done;
+  __ mov(scratch, Operand(esp, 2 * kPointerSize));
+  __ test(scratch, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_1, not_taken);
+  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+  __ bind(&done_load_1);
+
+  __ mov(scratch, Operand(esp, 1 * kPointerSize));
+  __ test(scratch, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_2, not_taken);
+  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_1);
+  __ sar(scratch, kSmiTagSize);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+  __ jmp(&done_load_1);
+
+  __ bind(&load_smi_2);
+  __ sar(scratch, kSmiTagSize);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+                                             Label* non_float,
+                                             Register scratch) {
+  Label test_other, done;
+  // Test if both operands are floats or smi -> scratch=k_is_float;
+  // Otherwise scratch = k_not_float.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &test_other, not_taken);  // argument in edx is OK
+  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(scratch, Factory::heap_number_map());
+  __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
+
+  __ bind(&test_other);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done);  // argument in eax is OK
+  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(scratch, Factory::heap_number_map());
+  __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
+
+  // Fall-through: Both operands are numbers.
+  __ bind(&done);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+  Label undo;
+  Label slow;
+  Label done;
+  Label try_float;
+
+  // Check whether the value is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &try_float, not_taken);
+
+  // Enter runtime system if the value of the expression is zero
+  // to make sure that we switch between 0 and -0.
+  __ test(eax, Operand(eax));
+  __ j(zero, &slow, not_taken);
+
+  // The value of the expression is a smi that is not zero.  Try
+  // optimistic subtraction '0 - value'.
+  __ mov(edx, Operand(eax));
+  __ Set(eax, Immediate(0));
+  __ sub(eax, Operand(edx));
+  __ j(overflow, &undo, not_taken);
+
+  // If result is a smi we are done.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done, taken);
+
+  // Restore eax and enter runtime system.
+  __ bind(&undo);
+  __ mov(eax, Operand(edx));
+
+  // Enter runtime system.
+  __ bind(&slow);
+  __ pop(ecx);  // pop return address
+  __ push(eax);
+  __ push(ecx);  // push return address
+  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+  // Try floating point case.
+  __ bind(&try_float);
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, Factory::heap_number_map());
+  __ j(not_equal, &slow);
+  if (overwrite_) {
+    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+    __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+  } else {
+    __ mov(edx, Operand(eax));
+    // edx: operand
+    FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+    // eax: allocated 'empty' number
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+  }
+
+  __ bind(&done);
+
+  __ StubReturn(1);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Nothing to do: The formal number of parameters has already been
+  // passed in register eax by calling function. Just return it.
+  __ ret(0);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame and return it.
+  __ bind(&adaptor);
+  __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ret(0);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The key is in edx and the parameter count is in eax.
+
+  // The displacement is used for skipping the frame pointer on the
+  // stack. It is the offset of the last parameter (if any) relative
+  // to the frame pointer.
+  static const int kDisplacement = 1 * kPointerSize;
+
+  // Check that the key is a smi.
+  Label slow;
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Check index against formal parameters count limit passed in
+  // through register eax. Use unsigned comparison to get negative
+  // check for free.
+  __ cmp(edx, Operand(eax));
+  __ j(above_equal, &slow, not_taken);
+
+  // Read the argument from the stack and return it.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  __ lea(ebx, Operand(ebp, eax, times_2, 0));
+  __ neg(edx);
+  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+  __ ret(0);
+
+  // Arguments adaptor case: Check index against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ cmp(edx, Operand(ecx));
+  __ j(above_equal, &slow, not_taken);
+
+  // Read the argument from the stack and return it.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+  __ neg(edx);
+  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+  __ ret(0);
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ pop(ebx);  // Return address.
+  __ push(edx);
+  __ push(ebx);
+  __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // The displacement is used for skipping the return address and the
+  // frame pointer on the stack. It is the offset of the last
+  // parameter (if any) relative to the frame pointer.
+  static const int kDisplacement = 2 * kPointerSize;
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &runtime);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ mov(Operand(esp, 1 * kPointerSize), ecx);
+  __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+  __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+  Label call_builtin, done;
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  if (cc_ == equal) {  // Both strict and non-strict.
+    Label slow;  // Fallthrough label.
+    // Equality is almost reflexive (everything but NaN), so start by testing
+    // for "identity and not NaN".
+    {
+      Label not_identical;
+      __ cmp(eax, Operand(edx));
+      __ j(not_equal, &not_identical);
+      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+      // so we do the second best thing - test it ourselves.
+
+      Label return_equal;
+      Label heap_number;
+      // If it's not a heap number, then return equal.
+      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+             Immediate(Factory::heap_number_map()));
+      __ j(equal, &heap_number);
+      __ bind(&return_equal);
+      __ Set(eax, Immediate(0));
+      __ ret(0);
+
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read top bits of double representation (second word of value).
+      __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
+      // Test that exponent bits are all set.
+      __ not_(eax);
+      __ test(eax, Immediate(0x7ff00000));
+      __ j(not_zero, &return_equal);
+      __ not_(eax);
+
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ shl(eax, 12);
+      // Or with all low-bits of mantissa.
+      __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
+      // Return zero equal if all bits in mantissa is zero (it's an Infinity)
+      // and non-zero if not (it's a NaN).
+      __ ret(0);
+
+      __ bind(&not_identical);
+    }
+
+    // If we're doing a strict equality comparison, we don't have to do
+    // type conversion, so we generate code to do fast comparison for objects
+    // and oddballs. Non-smi numbers and strings still go through the usual
+    // slow-case code.
+    if (strict_) {
+      // If either is a Smi (we know that not both are), then they can only
+      // be equal if the other is a HeapNumber. If so, use the slow case.
+      {
+        Label not_smis;
+        ASSERT_EQ(0, kSmiTag);
+        ASSERT_EQ(0, Smi::FromInt(0));
+        __ mov(ecx, Immediate(kSmiTagMask));
+        __ and_(ecx, Operand(eax));
+        __ test(ecx, Operand(edx));
+        __ j(not_zero, &not_smis);
+        // One operand is a smi.
+
+        // Check whether the non-smi is a heap number.
+        ASSERT_EQ(1, kSmiTagMask);
+        // ecx still holds eax & kSmiTag, which is either zero or one.
+        __ sub(Operand(ecx), Immediate(0x01));
+        __ mov(ebx, edx);
+        __ xor_(ebx, Operand(eax));
+        __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
+        __ xor_(ebx, Operand(eax));
+        // if eax was smi, ebx is now edx, else eax.
+
+        // Check if the non-smi operand is a heap number.
+        __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+               Immediate(Factory::heap_number_map()));
+        // If heap number, handle it in the slow case.
+        __ j(equal, &slow);
+        // Return non-equal (ebx is not zero)
+        __ mov(eax, ebx);
+        __ ret(0);
+
+        __ bind(&not_smis);
+      }
+
+      // If either operand is a JSObject or an oddball value, then they are not
+      // equal since their pointers are different
+      // There is no test for undetectability in strict equality.
+
+      // Get the type of the first operand.
+      __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+
+      // If the first object is a JS object, we have done pointer comparison.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      Label first_non_object;
+      __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+      __ j(less, &first_non_object);
+
+      // Return non-zero (eax is not zero)
+      Label return_not_equal;
+      ASSERT(kHeapObjectTag != 0);
+      __ bind(&return_not_equal);
+      __ ret(0);
+
+      __ bind(&first_non_object);
+      // Check for oddballs: true, false, null, undefined.
+      __ cmp(ecx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+
+      __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+      __ j(greater_equal, &return_not_equal);
+
+      // Check for oddballs: true, false, null, undefined.
+      __ cmp(ecx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      // Fall through to the general case.
+    }
+    __ bind(&slow);
+  }
+
+  // Push arguments below the return address.
+  __ pop(ecx);
+  __ push(eax);
+  __ push(edx);
+  __ push(ecx);
+
+  // Inlined floating point compare.
+  // Call builtin if operands are not floating point or smi.
+  Label check_for_symbols;
+  Label unordered;
+  if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+    CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+    CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+
+    FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+    __ comisd(xmm0, xmm1);
+
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(2 * kPointerSize);
+  } else {
+    FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
+    FloatingPointHelper::LoadFloatOperands(masm, ecx);
+    __ FCmp();
+
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
+
+    Label below_lbl, above_lbl;
+    // Return a result of -1, 0, or 1, to indicate result of comparison.
+    __ j(below, &below_lbl, not_taken);
+    __ j(above, &above_lbl, not_taken);
+
+    __ xor_(eax, Operand(eax));  // equal
+    // Both arguments were pushed in case a runtime call was needed.
+    __ ret(2 * kPointerSize);
+
+    __ bind(&below_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+    __ ret(2 * kPointerSize);
+
+    __ bind(&above_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+    __ ret(2 * kPointerSize);  // eax, edx were pushed
+  }
+  // If one of the numbers was NaN, then the result is always false.
+  // The cc is never not-equal.
+  __ bind(&unordered);
+  ASSERT(cc_ != not_equal);
+  if (cc_ == less || cc_ == less_equal) {
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+  } else {
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+  }
+  __ ret(2 * kPointerSize);  // eax, edx were pushed
+
+  // Fast negative check for symbol-to-symbol equality.
+  __ bind(&check_for_symbols);
+  if (cc_ == equal) {
+    BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
+    BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+
+    // We've already checked for object identity, so if both operands
+    // are symbols they aren't equal. Register eax already holds a
+    // non-zero value, which indicates not equal, so just return.
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&call_builtin);
+  // must swap argument order
+  __ pop(ecx);
+  __ pop(edx);
+  __ pop(eax);
+  __ push(edx);
+  __ push(eax);
+
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript builtin;
+  if (cc_ == equal) {
+    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    builtin = Builtins::COMPARE;
+    int ncr;  // NaN compare result
+    if (cc_ == less || cc_ == less_equal) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
+      ncr = LESS;
+    }
+    __ push(Immediate(Smi::FromInt(ncr)));
+  }
+
+  // Restore return address on the stack.
+  __ push(ecx);
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+                                    Label* label,
+                                    Register object,
+                                    Register scratch) {
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(zero, label);
+  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+  __ cmp(scratch, kSymbolTag | kStringTag);
+  __ j(not_equal, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  // Because builtins always remove the receiver from the stack, we
+  // have to fake one to avoid underflowing the stack. The receiver
+  // must be inserted below the return address on the stack so we
+  // temporarily store that in a register.
+  __ pop(eax);
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(eax);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  Label slow;
+
+  // Get the function to call from the stack.
+  // +2 ~ receiver, return address
+  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+  // Check that the function really is a JavaScript function.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  // Goto slow case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &slow, not_taken);
+
+  // Fast-case: Just invoke the function.
+  ParameterCount actual(argc_);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  __ Set(eax, Immediate(argc_));
+  __ Set(ebx, Immediate(0));
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+  Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+  __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+int CEntryStub::MinorKey() {
+  ASSERT(result_size_ <= 2);
+  // Result returned in eax, or eax+edx if result_size_ is 2.
+  return 0;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // eax holds the exception.
+
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ mov(esp, Operand::StaticVariable(handler_address));
+
+  // Restore next handler and frame pointer, discard handler state.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // Remove state.
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of
+  // a JS entry frame.
+  __ xor_(esi, Operand(esi));  // Tentatively set context pointer to NULL.
+  Label skip;
+  __ cmp(ebp, 0);
+  __ j(equal, &skip, not_taken);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ bind(&skip);
+
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ ret(0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              StackFrame::Type frame_type,
+                              bool do_gc,
+                              bool always_allocate_scope) {
+  // eax: result parameter for PerformGC, if any
+  // ebx: pointer to C function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // edi: number of arguments including receiver  (C callee-saved)
+  // esi: pointer to the first argument (C callee-saved)
+
+  if (do_gc) {
+    __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
+    __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate_scope) {
+    __ inc(Operand::StaticVariable(scope_depth));
+  }
+
+  // Call C function.
+  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
+  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
+  __ call(Operand(ebx));
+  // Result is in eax or edx:eax - do not destroy these registers!
+
+  if (always_allocate_scope) {
+    __ dec(Operand::StaticVariable(scope_depth));
+  }
+
+  // Make sure we're not trying to return 'the hole' from the runtime
+  // call as this may lead to crashes in the IC code later.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ cmp(eax, Factory::the_hole_value());
+    __ j(not_equal, &okay);
+    __ int3();
+    __ bind(&okay);
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ lea(ecx, Operand(eax, 1));
+  // Lower 2 bits of ecx are 0 iff eax has failure tag.
+  __ test(ecx, Immediate(kFailureTagMask));
+  __ j(zero, &failure_returned, not_taken);
+
+  // Exit the JavaScript to C++ exit frame.
+  __ LeaveExitFrame(frame_type);
+  __ ret(0);
+
+  // Handling of failure.
+  __ bind(&failure_returned);
+
+  Label retry;
+  // If the returned exception is RETRY_AFTER_GC continue at retry label
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+  __ j(zero, &retry, taken);
+
+  // Special handling of out of memory exceptions.
+  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+  __ j(equal, throw_out_of_memory_exception);
+
+  // Retrieve the pending exception and clear the variable.
+  ExternalReference pending_exception_address(Top::k_pending_exception_address);
+  __ mov(eax, Operand::StaticVariable(pending_exception_address));
+  __ mov(edx,
+         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+  __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ cmp(eax, Factory::termination_exception());
+  __ j(equal, throw_termination_exception);
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  // Retry.
+  __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop sp to the top stack handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ mov(esp, Operand::StaticVariable(handler_address));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  __ bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+  __ j(equal, &done);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ mov(esp, Operand(esp, kNextOffset));
+  __ jmp(&loop);
+  __ bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(Top::k_external_caught_exception_address);
+    __ mov(eax, false);
+    __ mov(Operand::StaticVariable(external_caught), eax);
+
+    // Set pending exception and eax to out of memory exception.
+    ExternalReference pending_exception(Top::k_pending_exception_address);
+    __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+    __ mov(Operand::StaticVariable(pending_exception), eax);
+  }
+
+  // Clear the context pointer.
+  __ xor_(esi, Operand(esi));
+
+  // Restore fp from handler and discard handler state.
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // State.
+
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ ret(0);
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+  // eax: number of arguments including receiver
+  // ebx: pointer to C function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // esi: current context (C callee-saved)
+  // edi: JS function of the caller (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects instead
+  // of a proper result. The builtin entry handles this by performing
+  // a garbage collection and retrying the builtin (twice).
+
+  StackFrame::Type frame_type = is_debug_break ?
+      StackFrame::EXIT_DEBUG :
+      StackFrame::EXIT;
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(frame_type);
+
+  // eax: result parameter for PerformGC, if any (setup below)
+  // ebx: pointer to builtin function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // edi: number of arguments including receiver (C callee-saved)
+  // esi: argv pointer (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Label not_outermost_js, not_outermost_js_2;
+#endif
+
+  // Setup frame.
+  __ push(ebp);
+  __ mov(ebp, Operand(esp));
+
+  // Push marker in two places.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ push(Immediate(Smi::FromInt(marker)));  // context slot
+  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  // Save callee-saved registers (C calling conventions).
+  __ push(edi);
+  __ push(esi);
+  __ push(ebx);
+
+  // Save copies of the top frame descriptor on the stack.
+  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+  __ push(Operand::StaticVariable(c_entry_fp));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ j(not_equal, &not_outermost_js);
+  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+  __ bind(&not_outermost_js);
+#endif
+
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ mov(Operand::StaticVariable(pending_exception), eax);
+  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+  __ jmp(&exit);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+  // Clear any pending exceptions.
+  __ mov(edx,
+         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+  __ mov(Operand::StaticVariable(pending_exception), edx);
+
+  // Fake a receiver (NULL).
+  __ push(Immediate(0));  // receiver
+
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. Notice that we
+  // cannot store a reference to the trampoline code directly in this
+  // stub, because the builtin stubs may not have been generated yet.
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ mov(edx, Immediate(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ mov(edx, Immediate(entry));
+  }
+  __ mov(edx, Operand(edx, 0));  // deref address
+  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+  __ call(Operand(edx));
+
+  // Unlink this frame from the handler chain.
+  __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  // Pop next_sp.
+  __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current EBP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+  __ j(not_equal, &not_outermost_js_2);
+  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ bind(&not_outermost_js_2);
+#endif
+
+  // Restore the top frame descriptor from the stack.
+  __ bind(&exit);
+  __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+  // Restore callee-saved registers (C calling conventions).
+  __ pop(ebx);
+  __ pop(esi);
+  __ pop(edi);
+  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
+
+  // Restore frame pointer and return.
+  __ pop(ebp);
+  __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Get the object - go slow case if it's a smi.
+  Label slow;
+  __ mov(eax, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, function
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Check that the left hand is a JS object.
+  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));  // eax - object map
+  __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset));  // ecx - type
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(greater, &slow, not_taken);
+
+  // Get the prototype of the function.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(greater, &slow, not_taken);
+
+  // Register mapping: eax is object map and ebx is function prototype.
+  __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+  // Loop through the prototype chain looking for the function prototype.
+  Label loop, is_instance, is_not_instance;
+  __ bind(&loop);
+  __ cmp(ecx, Operand(ebx));
+  __ j(equal, &is_instance);
+  __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+  __ j(equal, &is_not_instance);
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+  __ jmp(&loop);
+
+  __ bind(&is_instance);
+  __ Set(eax, Immediate(0));
+  __ ret(2 * kPointerSize);
+
+  __ bind(&is_not_instance);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret(2 * kPointerSize);
+
+  // Slow-case: Go through the JavaScript implementation.
+  __ bind(&slow);
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+  return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
new file mode 100644
index 0000000..142a5a1
--- /dev/null
+++ b/src/ia32/codegen-ia32.h
@@ -0,0 +1,664 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_CODEGEN_IA32_H_
+#define V8_IA32_CODEGEN_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen, Expression* expression);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT(type_ == ILLEGAL);
+    type_ = value;
+  }
+
+  // The size the reference takes up on the stack.
+  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+  // Return the name.  Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is left in place with its value above it.
+  void GetValue(TypeofState typeof_state);
+
+  // Like GetValue except that the slot is expected to be written to before
+  // being read from again.  Thae value of the reference may be invalidated,
+  // causing subsequent attempts to read it to fail.
+  void TakeValue(TypeofState typeof_state);
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The stored value is left in place (with the
+  // reference intact below it) to support chained assignments.
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through.  The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally.  Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+  ControlDestination(JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool true_is_fall_through)
+      : true_target_(true_target),
+        false_target_(false_target),
+        true_is_fall_through_(true_is_fall_through),
+        is_used_(false) {
+    ASSERT(true_is_fall_through ? !true_target->is_bound()
+                                : !false_target->is_bound());
+  }
+
+  // Accessors for the jump targets.  Directly jumping or branching to
+  // or binding the targets will not update the destination's state.
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+  // True if the the destination has been jumped to unconditionally or
+  // control has been split to both targets.  This predicate does not
+  // test whether the targets have been extracted and manipulated as
+  // raw jump targets.
+  bool is_used() const { return is_used_; }
+
+  // True if the destination is used and the true target (respectively
+  // false target) was the fall through.  If the target is backward,
+  // "fall through" included jumping unconditionally to it.
+  bool true_was_fall_through() const {
+    return is_used_ && true_is_fall_through_;
+  }
+
+  bool false_was_fall_through() const {
+    return is_used_ && !true_is_fall_through_;
+  }
+
+  // Emit a branch to one of the true or false targets, and bind the
+  // other target.  Because this binds the fall-through target, it
+  // should be emitted in tail position (as the last thing when
+  // compiling an expression).
+  void Split(Condition cc) {
+    ASSERT(!is_used_);
+    if (true_is_fall_through_) {
+      false_target_->Branch(NegateCondition(cc));
+      true_target_->Bind();
+    } else {
+      true_target_->Branch(cc);
+      false_target_->Bind();
+    }
+    is_used_ = true;
+  }
+
+  // Emit an unconditional jump in tail position, to the true target
+  // (if the argument is true) or the false target.  The "jump" will
+  // actually bind the jump target if it is forward, jump to it if it
+  // is backward.
+  void Goto(bool where) {
+    ASSERT(!is_used_);
+    JumpTarget* target = where ? true_target_ : false_target_;
+    if (target->is_bound()) {
+      target->Jump();
+    } else {
+      target->Bind();
+    }
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Mark this jump target as used as if Goto had been called, but
+  // without generating a jump or binding a label (the control effect
+  // should have already happened).  This is used when the left
+  // subexpression of the short-circuit boolean operators are
+  // compiled.
+  void Use(bool where) {
+    ASSERT(!is_used_);
+    ASSERT((where ? true_target_ : false_target_)->is_bound());
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Swap the true and false targets but keep the same actual label as
+  // the fall through.  This is used when compiling negated
+  // expressions, where we want to swap the targets but preserve the
+  // state.
+  void Invert() {
+    JumpTarget* temp_target = true_target_;
+    true_target_ = false_target_;
+    false_target_ = temp_target;
+
+    true_is_fall_through_ = !true_is_fall_through_;
+  }
+
+ private:
+  // True and false jump targets.
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // Before using the destination: true if the true target is the
+  // preferred fall through, false if the false target is.  After
+  // using the destination: true if the true target was actually used
+  // as the fall through, false if the false target was.
+  bool true_is_fall_through_;
+
+  // True if the Split or Goto functions have been called.
+  bool is_used_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the jump target pair).  It is threaded through
+// the call stack.  Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state may or may not be inside a typeof, and has its
+  // own control destination.
+  CodeGenState(CodeGenerator* owner,
+               TypeofState typeof_state,
+               ControlDestination* destination);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  // Accessors for the state.
+  TypeofState typeof_state() const { return typeof_state_; }
+  ControlDestination* destination() const { return destination_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  // A control destination in case the expression has a control-flow
+  // effect.
+  ControlDestination* destination_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+  NO_ARGUMENTS_ALLOCATION,
+  EAGER_ARGUMENTS_ALLOCATION,
+  LAZY_ARGUMENTS_ALLOCATION
+};
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+
+  VirtualFrame* frame() const { return frame_; }
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
+ private:
+  // Construction/Destruction
+  CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors
+  Scope* scope() const { return scope_; }
+  bool is_eval() { return is_eval_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  // State
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  ControlDestination* destination() const { return state_->destination(); }
+
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+  // Main code generation function
+  void GenCode(FunctionLiteral* fun);
+
+  // Generate the return sequence code.  Should be called no more than
+  // once per compiled function, immediately after binding the return
+  // target (which can not be done more than once).
+  void GenerateReturnSequence(Result* return_value);
+
+  // Returns the arguments allocation mode.
+  ArgumentsAllocationMode ArgumentsMode() const;
+
+  // Store the arguments object and allocate it if necessary.
+  Result StoreArgumentsObject(bool initial);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  Operand ContextOperand(Register context, int index) const {
+    return Operand(context, Context::SlotOffset(index));
+  }
+
+  Operand SlotOperand(Slot* slot, Register tmp);
+
+  Operand ContextSlotOperandCheckExtensions(Slot* slot,
+                                            Result tmp,
+                                            JumpTarget* slow);
+
+  // Expressions
+  Operand GlobalObject() const {
+    return ContextOperand(esi, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     TypeofState typeof_state,
+                     ControlDestination* destination,
+                     bool force_control);
+  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadGlobal();
+  void LoadGlobalReceiver();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                           TypeofState typeof_state,
+                                           JumpTarget* slow);
+
+  // Store the value on top of the expression stack into a slot, leaving the
+  // value in place.
+  void StoreToSlot(Slot* slot, InitState init_state);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  // Translate the value on top of the frame into control flow to the
+  // control destination.
+  void ToBoolean(ControlDestination* destination);
+
+  void GenericBinaryOperation(
+      Token::Value op,
+      SmiAnalysis* type,
+      OverwriteMode overwrite_mode);
+
+  // If possible, combine two constant smi values using op to produce
+  // a smi result, and push it on the virtual frame, all at compile time.
+  // Returns true if it succeeds.  Otherwise it has no effect.
+  bool FoldConstantSmis(Token::Value op, int left, int right);
+
+  // Emit code to perform a binary operation on a constant
+  // smi and a likely smi.  Consumes the Result *operand.
+  void ConstantSmiBinaryOperation(Token::Value op,
+                                  Result* operand,
+                                  Handle<Object> constant_operand,
+                                  SmiAnalysis* type,
+                                  bool reversed,
+                                  OverwriteMode overwrite_mode);
+
+  // Emit code to perform a binary operation on two likely smis.
+  // The code to handle smi arguments is produced inline.
+  // Consumes the Results *left and *right.
+  void LikelySmiBinaryOperation(Token::Value op,
+                                Result* left,
+                                Result* right,
+                                OverwriteMode overwrite_mode);
+
+  void Comparison(Condition cc,
+                  bool strict,
+                  ControlDestination* destination);
+
+  // To prevent long attacker-controlled byte sequences, integer constants
+  // from the JavaScript source are loaded in two parts if they are larger
+  // than 16 bits.
+  static const int kMaxSmiInlinedBits = 16;
+  bool IsUnsafeSmi(Handle<Object> value);
+  // Load an integer constant x into a register target using
+  // at most 16 bits of user-controlled data per assembly operation.
+  void LoadUnsafeSmi(Register target, Handle<Object> value);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+  // Use an optimized version of Function.prototype.apply that avoid
+  // allocating the arguments object and just copies the arguments
+  // from the stack.
+  void CallApplyLazy(Property* apply,
+                     Expression* receiver,
+                     VariableProxy* arguments,
+                     int position);
+
+  void CheckStack();
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Instantiate the function boilerplate.
+  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* stmt);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.  There should
+  // be no frame-external references to (non-reserved) registers.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+  Handle<Script> script_;
+  ZoneList<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  CodeGenState* state_;
+  int loop_nesting_;
+
+  // Jump targets.
+  // The target of the return from the function.
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class Result;
+
+  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+  SMI_CODE_IN_STUB,
+  SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode,
+                      GenericBinaryFlags flags)
+      : op_(op), mode_(mode), flags_(flags) {
+    use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+  Token::Value op_;
+  OverwriteMode mode_;
+  GenericBinaryFlags flags_;
+  bool use_sse3_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+           Token::String(op_),
+           static_cast<int>(mode_),
+           static_cast<int>(flags_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 12> {};
+  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+  Major MajorKey() { return GenericBinaryOp; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | FlagBits::encode(flags_)
+           | SSE3Bits::encode(use_sse3_);
+  }
+  void Generate(MacroAssembler* masm);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
new file mode 100644
index 0000000..2107ad9
--- /dev/null
+++ b/src/ia32/cpu-ia32.cc
@@ -0,0 +1,79 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for ia32 independent of OS goes here.
+
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
+#include "v8.h"
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  CpuFeatures::Probe();
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+  // No need to flush the instruction cache on Intel. On Intel instruction
+  // cache flushing is only necessary when multiple cores running the same
+  // code simultaneously. V8 (and JavaScript) is single threaded and when code
+  // is patched on an intel CPU the core performing the patching will have its
+  // own instruction cache updated automatically.
+
+  // If flushing of the instruction cache becomes necessary Windows has the
+  // API function FlushInstructionCache.
+
+  // By default, valgrind only checks the stack for writes that might need to
+  // invalidate already cached translated code.  This leads to random
+  // instability when code patches or moves are sometimes unnoticed.  One
+  // solution is to run valgrind with --smc-check=all, but this comes at a big
+  // performance cost.  We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
+}
+
+
+void CPU::DebugBreak() {
+#ifdef _MSC_VER
+  // To avoid Visual Studio runtime support the following code can be used
+  // instead
+  // __asm { int 3 }
+  __debugbreak();
+#else
+  asm("int $3");
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
new file mode 100644
index 0000000..7e0dfd1
--- /dev/null
+++ b/src/ia32/debug-ia32.cc
@@ -0,0 +1,208 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
+// for the precise return instructions sequence.
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  ASSERT(Debug::kIa32JSReturnSequenceLength >=
+         Debug::kIa32CallInstructionLength);
+  rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+      Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Debug::kIa32JSReturnSequenceLength);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsCallInstruction();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList pointer_regs,
+                                          bool convert_call_to_jmp) {
+  // Save the content of all general purpose registers in memory. This copy in
+  // memory is later pushed onto the JS expression stack for the fake JS frame
+  // generated and also to the C frame generated on top of that. In the JS
+  // frame ONLY the registers containing pointers will be pushed on the
+  // expression stack. This causes the GC to update these pointers so that
+  // they will have the correct value when returning from the debugger.
+  __ SaveRegistersToMemory(kJSCallerSaved);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Store the registers containing object pointers on the expression stack to
+  // make sure that these are correctly updated during GC.
+  __ PushRegistersFromMemory(pointer_regs);
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ Set(eax, Immediate(0));  // no arguments
+  __ mov(ebx, Immediate(ExternalReference::debug_break()));
+
+  CEntryDebugBreakStub ceb;
+  __ CallStub(&ceb);
+
+  // Restore the register values containing object pointers from the expression
+  // stack in the reverse order as they where pushed.
+  __ PopRegistersToMemory(pointer_regs);
+
+  // Get rid of the internal frame.
+  __ LeaveInternalFrame();
+
+  // If this call did not replace a call but patched other code then there will
+  // be an unwanted return address left on the stack. Here we get rid of that.
+  if (convert_call_to_jmp) {
+    __ pop(eax);
+  }
+
+  // Finally restore all registers.
+  __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference(Debug_Address::AfterBreakTarget());
+  __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, ecx.bit(), false);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  // REgister state for IC store call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  // -----------------------------------
+  // Register eax contains an object that needs to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, eax.bit(), false);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC call call (from ic-ia32.cc)
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  // -----------------------------------
+  // The number of arguments in eax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-ia32.cc).
+  // eax is the actual number of arguments not encoded as a smi see comment
+  // above IC call.
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  // -----------------------------------
+  // The number of arguments in eax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax: return value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+#undef __
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
new file mode 100644
index 0000000..458844e
--- /dev/null
+++ b/src/ia32/disasm-ia32.cc
@@ -0,0 +1,1202 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "v8.h"
+#include "disasm.h"
+
+namespace disasm {
+
+enum OperandOrder {
+  UNSET_OP_ORDER = 0,
+  REG_OPER_OP_ORDER,
+  OPER_REG_OP_ORDER
+};
+
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+  int b;  // -1 terminates, otherwise must be in range (0..255)
+  const char* mnem;
+  OperandOrder op_order_;
+};
+
+
+static ByteMnemonic two_operands_instr[] = {
+  {0x03, "add", REG_OPER_OP_ORDER},
+  {0x21, "and", OPER_REG_OP_ORDER},
+  {0x23, "and", REG_OPER_OP_ORDER},
+  {0x3B, "cmp", REG_OPER_OP_ORDER},
+  {0x8D, "lea", REG_OPER_OP_ORDER},
+  {0x09, "or", OPER_REG_OP_ORDER},
+  {0x0B, "or", REG_OPER_OP_ORDER},
+  {0x1B, "sbb", REG_OPER_OP_ORDER},
+  {0x29, "sub", OPER_REG_OP_ORDER},
+  {0x2B, "sub", REG_OPER_OP_ORDER},
+  {0x85, "test", REG_OPER_OP_ORDER},
+  {0x31, "xor", OPER_REG_OP_ORDER},
+  {0x33, "xor", REG_OPER_OP_ORDER},
+  {0x87, "xchg", REG_OPER_OP_ORDER},
+  {0x8A, "mov_b", REG_OPER_OP_ORDER},
+  {0x8B, "mov", REG_OPER_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic zero_operands_instr[] = {
+  {0xC3, "ret", UNSET_OP_ORDER},
+  {0xC9, "leave", UNSET_OP_ORDER},
+  {0x90, "nop", UNSET_OP_ORDER},
+  {0xF4, "hlt", UNSET_OP_ORDER},
+  {0xCC, "int3", UNSET_OP_ORDER},
+  {0x60, "pushad", UNSET_OP_ORDER},
+  {0x61, "popad", UNSET_OP_ORDER},
+  {0x9C, "pushfd", UNSET_OP_ORDER},
+  {0x9D, "popfd", UNSET_OP_ORDER},
+  {0x9E, "sahf", UNSET_OP_ORDER},
+  {0x99, "cdq", UNSET_OP_ORDER},
+  {0x9B, "fwait", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic call_jump_instr[] = {
+  {0xE8, "call", UNSET_OP_ORDER},
+  {0xE9, "jmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic short_immediate_instr[] = {
+  {0x05, "add", UNSET_OP_ORDER},
+  {0x0D, "or", UNSET_OP_ORDER},
+  {0x15, "adc", UNSET_OP_ORDER},
+  {0x25, "and", UNSET_OP_ORDER},
+  {0x2D, "sub", UNSET_OP_ORDER},
+  {0x35, "xor", UNSET_OP_ORDER},
+  {0x3D, "cmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const char* jump_conditional_mnem[] = {
+  /*0*/ "jo", "jno", "jc", "jnc",
+  /*4*/ "jz", "jnz", "jna", "ja",
+  /*8*/ "js", "jns", "jpe", "jpo",
+  /*12*/ "jl", "jnl", "jng", "jg"
+};
+
+
+static const char* set_conditional_mnem[] = {
+  /*0*/ "seto", "setno", "setc", "setnc",
+  /*4*/ "setz", "setnz", "setna", "seta",
+  /*8*/ "sets", "setns", "setpe", "setpo",
+  /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
+enum InstructionType {
+  NO_INSTR,
+  ZERO_OPERANDS_INSTR,
+  TWO_OPERANDS_INSTR,
+  JUMP_CONDITIONAL_SHORT_INSTR,
+  REGISTER_INSTR,
+  MOVE_REG_INSTR,
+  CALL_JUMP_INSTR,
+  SHORT_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+  const char* mnem;
+  InstructionType type;
+  OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+  InstructionTable();
+  const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+
+ private:
+  InstructionDesc instructions_[256];
+  void Clear();
+  void Init();
+  void CopyTable(ByteMnemonic bm[], InstructionType type);
+  void SetTableRange(InstructionType type,
+                     byte start,
+                     byte end,
+                     const char* mnem);
+  void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+  Clear();
+  Init();
+}
+
+
+void InstructionTable::Clear() {
+  for (int i = 0; i < 256; i++) {
+    instructions_[i].mnem = "";
+    instructions_[i].type = NO_INSTR;
+    instructions_[i].op_order_ = UNSET_OP_ORDER;
+  }
+}
+
+
+void InstructionTable::Init() {
+  CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+  CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+  CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+  CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  AddJumpConditionalShort();
+  SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
+  SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
+  SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
+  SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+  SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,");  // 0x90 is nop.
+  SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+  for (int i = 0; bm[i].b >= 0; i++) {
+    InstructionDesc* id = &instructions_[bm[i].b];
+    id->mnem = bm[i].mnem;
+    id->op_order_ = bm[i].op_order_;
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->type = type;
+  }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+                                     byte start,
+                                     byte end,
+                                     const char* mnem) {
+  for (byte b = start; b <= end; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = mnem;
+    id->type = type;
+  }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+  for (byte b = 0x70; b <= 0x7F; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = jump_conditional_mnem[b & 0x0F];
+    id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+  }
+}
+
+
+static InstructionTable instruction_table;
+
+
+// The IA32 disassembler implementation.
+class DisassemblerIA32 {
+ public:
+  DisassemblerIA32(const NameConverter& converter,
+                   bool abort_on_unimplemented = true)
+      : converter_(converter),
+        tmp_buffer_pos_(0),
+        abort_on_unimplemented_(abort_on_unimplemented) {
+    tmp_buffer_[0] = '\0';
+  }
+
+  virtual ~DisassemblerIA32() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+  const NameConverter& converter_;
+  v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+  unsigned int tmp_buffer_pos_;
+  bool abort_on_unimplemented_;
+
+
+  enum {
+    eax = 0,
+    ecx = 1,
+    edx = 2,
+    ebx = 3,
+    esp = 4,
+    ebp = 5,
+    esi = 6,
+    edi = 7
+  };
+
+
+  const char* NameOfCPURegister(int reg) const {
+    return converter_.NameOfCPURegister(reg);
+  }
+
+
+  const char* NameOfByteCPURegister(int reg) const {
+    return converter_.NameOfByteCPURegister(reg);
+  }
+
+
+  const char* NameOfXMMRegister(int reg) const {
+    return converter_.NameOfXMMRegister(reg);
+  }
+
+
+  const char* NameOfAddress(byte* addr) const {
+    return converter_.NameOfAddress(addr);
+  }
+
+
+  // Disassembler helper functions.
+  static void get_modrm(byte data, int* mod, int* regop, int* rm) {
+    *mod = (data >> 6) & 3;
+    *regop = (data & 0x38) >> 3;
+    *rm = data & 7;
+  }
+
+
+  static void get_sib(byte data, int* scale, int* index, int* base) {
+    *scale = (data >> 6) & 3;
+    *index = (data >> 3) & 7;
+    *base = data & 7;
+  }
+
+  typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
+
+  int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
+  int PrintRightOperand(byte* modrmp);
+  int PrintRightByteOperand(byte* modrmp);
+  int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
+  int PrintImmediateOp(byte* data);
+  int F7Instruction(byte* data);
+  int D1D3C1Instruction(byte* data);
+  int JumpShort(byte* data);
+  int JumpConditional(byte* data, const char* comment);
+  int JumpConditionalShort(byte* data, const char* comment);
+  int SetCC(byte* data);
+  int FPUInstruction(byte* data);
+  void AppendToBuffer(const char* format, ...);
+
+
+  void UnimplementedInstruction() {
+    if (abort_on_unimplemented_) {
+      UNIMPLEMENTED();
+    } else {
+      AppendToBuffer("'Unimplemented Instruction'");
+    }
+  }
+};
+
+
+void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
+  v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+  va_list args;
+  va_start(args, format);
+  int result = v8::internal::OS::VSNPrintF(buf, format, args);
+  va_end(args);
+  tmp_buffer_pos_ += result;
+}
+
+int DisassemblerIA32::PrintRightOperandHelper(
+    byte* modrmp,
+    RegisterNameMapping register_name) {
+  int mod, regop, rm;
+  get_modrm(*modrmp, &mod, &regop, &rm);
+  switch (mod) {
+    case 0:
+      if (rm == ebp) {
+        int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+        AppendToBuffer("[0x%x]", disp);
+        return 5;
+      } else if (rm == esp) {
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        if (index == esp && base == esp && scale == 0 /*times_1*/) {
+          AppendToBuffer("[%s]", (this->*register_name)(rm));
+          return 2;
+        } else if (base == ebp) {
+          int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+          AppendToBuffer("[%s*%d+0x%x]",
+                         (this->*register_name)(index),
+                         1 << scale,
+                         disp);
+          return 6;
+        } else if (index != esp && base != ebp) {
+          // [base+index*scale]
+          AppendToBuffer("[%s+%s*%d]",
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
+                         1 << scale);
+          return 2;
+        } else {
+          UnimplementedInstruction();
+          return 1;
+        }
+      } else {
+        AppendToBuffer("[%s]", (this->*register_name)(rm));
+        return 1;
+      }
+      break;
+    case 1:  // fall through
+    case 2:
+      if (rm == esp) {
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        int disp =
+            mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+        if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
+          AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+        } else {
+          AppendToBuffer("[%s+%s*%d+0x%x]",
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
+                         1 << scale,
+                         disp);
+        }
+        return mod == 2 ? 6 : 3;
+      } else {
+        // No sib.
+        int disp =
+            mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
+        AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+        return mod == 2 ? 5 : 2;
+      }
+      break;
+    case 3:
+      AppendToBuffer("%s", (this->*register_name)(rm));
+      return 1;
+    default:
+      UnimplementedInstruction();
+      return 1;
+  }
+  UNREACHABLE();
+}
+
+
+int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
+}
+
+
+int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerIA32::NameOfByteCPURegister);
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerIA32::PrintOperands(const char* mnem,
+                                    OperandOrder op_order,
+                                    byte* data) {
+  byte modrm = *data;
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  int advance = 0;
+  switch (op_order) {
+    case REG_OPER_OP_ORDER: {
+      AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+      advance = PrintRightOperand(data);
+      break;
+    }
+    case OPER_REG_OP_ORDER: {
+      AppendToBuffer("%s ", mnem);
+      advance = PrintRightOperand(data);
+      AppendToBuffer(",%s", NameOfCPURegister(regop));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerIA32::PrintImmediateOp(byte* data) {
+  bool sign_extension_bit = (*data & 0x02) != 0;
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  const char* mnem = "Imm???";
+  switch (regop) {
+    case 0: mnem = "add"; break;
+    case 1: mnem = "or"; break;
+    case 2: mnem = "adc"; break;
+    case 4: mnem = "and"; break;
+    case 5: mnem = "sub"; break;
+    case 6: mnem = "xor"; break;
+    case 7: mnem = "cmp"; break;
+    default: UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data+1);
+  if (sign_extension_bit) {
+    AppendToBuffer(",0x%x", *(data + 1 + count));
+    return 1 + count + 1 /*int8*/;
+  } else {
+    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+    return 1 + count + 4 /*int32_t*/;
+  }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::F7Instruction(byte* data) {
+  assert(*data == 0xF7);
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  if (mod == 3 && regop != 0) {
+    const char* mnem = NULL;
+    switch (regop) {
+      case 2: mnem = "not"; break;
+      case 3: mnem = "neg"; break;
+      case 4: mnem = "mul"; break;
+      case 7: mnem = "idiv"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
+    return 2;
+  } else if (mod == 3 && regop == eax) {
+    int32_t imm = *reinterpret_cast<int32_t*>(data+2);
+    AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
+    return 6;
+  } else if (regop == eax) {
+    AppendToBuffer("test ");
+    int count = PrintRightOperand(data+1);
+    int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
+    AppendToBuffer(",0x%x", imm);
+    return 1+count+4 /*int32_t*/;
+  } else {
+    UnimplementedInstruction();
+    return 2;
+  }
+}
+
+int DisassemblerIA32::D1D3C1Instruction(byte* data) {
+  byte op = *data;
+  assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  int imm8 = -1;
+  int num_bytes = 2;
+  if (mod == 3) {
+    const char* mnem = NULL;
+    if (op == 0xD1) {
+      imm8 = 1;
+      switch (regop) {
+        case edx: mnem = "rcl"; break;
+        case edi: mnem = "sar"; break;
+        case esp: mnem = "shl"; break;
+        default: UnimplementedInstruction();
+      }
+    } else if (op == 0xC1) {
+      imm8 = *(data+2);
+      num_bytes = 3;
+      switch (regop) {
+        case edx: mnem = "rcl"; break;
+        case esp: mnem = "shl"; break;
+        case ebp: mnem = "shr"; break;
+        case edi: mnem = "sar"; break;
+        default: UnimplementedInstruction();
+      }
+    } else if (op == 0xD3) {
+      switch (regop) {
+        case esp: mnem = "shl"; break;
+        case ebp: mnem = "shr"; break;
+        case edi: mnem = "sar"; break;
+        default: UnimplementedInstruction();
+      }
+    }
+    assert(mnem != NULL);
+    AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
+    if (imm8 > 0) {
+      AppendToBuffer("%d", imm8);
+    } else {
+      AppendToBuffer("cl");
+    }
+  } else {
+    UnimplementedInstruction();
+  }
+  return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpShort(byte* data) {
+  assert(*data == 0xEB);
+  byte b = *(data+1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  AppendToBuffer("jmp %s", NameOfAddress(dest));
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
+  assert(*data == 0x0F);
+  byte cond = *(data+1) & 0x0F;
+  byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+  const char* mnem = jump_conditional_mnem[cond];
+  AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+  if (comment != NULL) {
+    AppendToBuffer(", %s", comment);
+  }
+  return 6;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
+  byte cond = *data & 0x0F;
+  byte b = *(data+1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  const char* mnem = jump_conditional_mnem[cond];
+  AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+  if (comment != NULL) {
+    AppendToBuffer(", %s", comment);
+  }
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::SetCC(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data+1) & 0x0F;
+  const char* mnem = set_conditional_mnem[cond];
+  AppendToBuffer("%s ", mnem);
+  PrintRightByteOperand(data+2);
+  return 3;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::FPUInstruction(byte* data) {
+  byte b1 = *data;
+  byte b2 = *(data + 1);
+  if (b1 == 0xD9) {
+    const char* mnem = NULL;
+    switch (b2) {
+      case 0xE8: mnem = "fld1"; break;
+      case 0xEE: mnem = "fldz"; break;
+      case 0xE1: mnem = "fabs"; break;
+      case 0xE0: mnem = "fchs"; break;
+      case 0xF8: mnem = "fprem"; break;
+      case 0xF5: mnem = "fprem1"; break;
+      case 0xF7: mnem = "fincstp"; break;
+      case 0xE4: mnem = "ftst"; break;
+    }
+    if (mnem != NULL) {
+      AppendToBuffer("%s", mnem);
+      return 2;
+    } else if ((b2 & 0xF8) == 0xC8) {
+      AppendToBuffer("fxch st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data+1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case eax: mnem = "fld_s"; break;
+        case ebx: mnem = "fstp_s"; break;
+        default: UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDD) {
+    if ((b2 & 0xF8) == 0xC0) {
+      AppendToBuffer("ffree st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data+1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case eax: mnem = "fld_d"; break;
+        case ebx: mnem = "fstp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDB) {
+    int mod, regop, rm;
+    get_modrm(*(data+1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case eax: mnem = "fild_s"; break;
+      case edx: mnem = "fist_s"; break;
+      case ebx: mnem = "fistp_s"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDF) {
+    if (b2 == 0xE0) {
+      AppendToBuffer("fnstsw_ax");
+      return 2;
+    }
+    int mod, regop, rm;
+    get_modrm(*(data+1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case ebp: mnem = "fild_d"; break;
+      case edi: mnem = "fistp_d"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDC || b1 == 0xDE) {
+    bool is_pop = (b1 == 0xDE);
+    if (is_pop && b2 == 0xD9) {
+      AppendToBuffer("fcompp");
+      return 2;
+    }
+    const char* mnem = "FP0xDC";
+    switch (b2 & 0xF8) {
+      case 0xC0: mnem = "fadd"; break;
+      case 0xE8: mnem = "fsub"; break;
+      case 0xC8: mnem = "fmul"; break;
+      case 0xF8: mnem = "fdiv"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
+    return 2;
+  } else if (b1 == 0xDA && b2 == 0xE9) {
+    const char* mnem = "fucompp";
+    AppendToBuffer("%s", mnem);
+    return 2;
+  }
+  AppendToBuffer("Unknown FP instruction");
+  return 2;
+}
+
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+  switch (f0byte) {
+    case 0xA2: return "cpuid";
+    case 0x31: return "rdtsc";
+    case 0xBE: return "movsx_b";
+    case 0xBF: return "movsx_w";
+    case 0xB6: return "movzx_b";
+    case 0xB7: return "movzx_w";
+    case 0xAF: return "imul";
+    case 0xA5: return "shld";
+    case 0xAD: return "shrd";
+    case 0xAB: return "bts";
+    default: return NULL;
+  }
+}
+
+
+// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
+                                        byte* instr) {
+  tmp_buffer_pos_ = 0;  // starting to write as position 0
+  byte* data = instr;
+  // Check for hints.
+  const char* branch_hint = NULL;
+  // We use these two prefixes only with branch prediction
+  if (*data == 0x3E /*ds*/) {
+    branch_hint = "predicted taken";
+    data++;
+  } else if (*data == 0x2E /*cs*/) {
+    branch_hint = "predicted not taken";
+    data++;
+  }
+  bool processed = true;  // Will be set to false if the current instruction
+                          // is not in 'instructions' table.
+  const InstructionDesc& idesc = instruction_table.Get(*data);
+  switch (idesc.type) {
+    case ZERO_OPERANDS_INSTR:
+      AppendToBuffer(idesc.mnem);
+      data++;
+      break;
+
+    case TWO_OPERANDS_INSTR:
+      data++;
+      data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+      break;
+
+    case JUMP_CONDITIONAL_SHORT_INSTR:
+      data += JumpConditionalShort(data, branch_hint);
+      break;
+
+    case REGISTER_INSTR:
+      AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+      data++;
+      break;
+
+    case MOVE_REG_INSTR: {
+      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+      AppendToBuffer("mov %s,%s",
+                     NameOfCPURegister(*data & 0x07),
+                     NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case CALL_JUMP_INSTR: {
+      byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
+      AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case SHORT_IMMEDIATE_INSTR: {
+      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+      AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case NO_INSTR:
+      processed = false;
+      break;
+
+    default:
+      UNIMPLEMENTED();  // This type is not implemented.
+  }
+  //----------------------------
+  if (!processed) {
+    switch (*data) {
+      case 0xC2:
+        AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+        data += 3;
+        break;
+
+      case 0x69:  // fall through
+      case 0x6B:
+        { int mod, regop, rm;
+          get_modrm(*(data+1), &mod, &regop, &rm);
+          int32_t imm =
+              *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
+          AppendToBuffer("imul %s,%s,0x%x",
+                         NameOfCPURegister(regop),
+                         NameOfCPURegister(rm),
+                         imm);
+          data += 2 + (*data == 0x6B ? 1 : 4);
+        }
+        break;
+
+      case 0xF6:
+        { int mod, regop, rm;
+          get_modrm(*(data+1), &mod, &regop, &rm);
+          if (mod == 3 && regop == eax) {
+            AppendToBuffer("test_b %s,%d", NameOfCPURegister(rm), *(data+2));
+          } else {
+            UnimplementedInstruction();
+          }
+          data += 3;
+        }
+        break;
+
+      case 0x81:  // fall through
+      case 0x83:  // 0x81 with sign extension bit set
+        data += PrintImmediateOp(data);
+        break;
+
+      case 0x0F:
+        { byte f0byte = *(data+1);
+          const char* f0mnem = F0Mnem(f0byte);
+          if (f0byte == 0xA2 || f0byte == 0x31) {
+            AppendToBuffer("%s", f0mnem);
+            data += 2;
+          } else if ((f0byte & 0xF0) == 0x80) {
+            data += JumpConditional(data, branch_hint);
+          } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+                     f0byte == 0xB7 || f0byte == 0xAF) {
+            data += 2;
+            data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+          } else if ((f0byte & 0xF0) == 0x90) {
+            data += SetCC(data);
+          } else {
+            data += 2;
+            if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+              // shrd, shld, bts
+              AppendToBuffer("%s ", f0mnem);
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              data += PrintRightOperand(data);
+              if (f0byte == 0xAB) {
+                AppendToBuffer(",%s", NameOfCPURegister(regop));
+              } else {
+                AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+              }
+            } else {
+              UnimplementedInstruction();
+            }
+          }
+        }
+        break;
+
+      case 0x8F:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          if (regop == eax) {
+            AppendToBuffer("pop ");
+            data += PrintRightOperand(data);
+          }
+        }
+        break;
+
+      case 0xFF:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          const char* mnem = NULL;
+          switch (regop) {
+            case esi: mnem = "push"; break;
+            case eax: mnem = "inc"; break;
+            case ecx: mnem = "dec"; break;
+            case edx: mnem = "call"; break;
+            case esp: mnem = "jmp"; break;
+            default: mnem = "???";
+          }
+          AppendToBuffer("%s ", mnem);
+          data += PrintRightOperand(data);
+        }
+        break;
+
+      case 0xC7:  // imm32, fall through
+      case 0xC6:  // imm8
+        { bool is_byte = *data == 0xC6;
+          data++;
+          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+          data += PrintRightOperand(data);
+          int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += is_byte ? 1 : 4;
+        }
+        break;
+
+      case 0x80:
+        { data++;
+          AppendToBuffer("%s ", "cmpb");
+          data += PrintRightOperand(data);
+          int32_t imm = *data;
+          AppendToBuffer(",0x%x", imm);
+          data++;
+        }
+        break;
+
+      case 0x88:  // 8bit, fall through
+      case 0x89:  // 32bit
+        { bool is_byte = *data == 0x88;
+          int mod, regop, rm;
+          data++;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
+        }
+        break;
+
+      case 0x66:  // prefix
+        data++;
+        if (*data == 0x8B) {
+          data++;
+          data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+        } else if (*data == 0x89) {
+          data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("mov_w ");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xFE:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          if (mod == 3 && regop == ecx) {
+            AppendToBuffer("dec_b %s", NameOfCPURegister(rm));
+          } else {
+            UnimplementedInstruction();
+          }
+          data++;
+        }
+        break;
+
+      case 0x68:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+        data += 5;
+        break;
+
+      case 0x6A:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+        data += 2;
+        break;
+
+      case 0xA8:
+        AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+        data += 2;
+        break;
+
+      case 0xA9:
+        AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+        data += 5;
+        break;
+
+      case 0xD1:  // fall through
+      case 0xD3:  // fall through
+      case 0xC1:
+        data += D1D3C1Instruction(data);
+        break;
+
+      case 0xD9:  // fall through
+      case 0xDA:  // fall through
+      case 0xDB:  // fall through
+      case 0xDC:  // fall through
+      case 0xDD:  // fall through
+      case 0xDE:  // fall through
+      case 0xDF:
+        data += FPUInstruction(data);
+        break;
+
+      case 0xEB:
+        data += JumpShort(data);
+        break;
+
+      case 0xF2:
+        if (*(data+1) == 0x0F) {
+          byte b2 = *(data+2);
+          if (b2 == 0x11) {
+            AppendToBuffer("movsd ");
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (b2 == 0x10) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+            data += PrintRightOperand(data);
+          } else {
+            const char* mnem = "?";
+            switch (b2) {
+              case 0x2A: mnem = "cvtsi2sd"; break;
+              case 0x58: mnem = "addsd"; break;
+              case 0x59: mnem = "mulsd"; break;
+              case 0x5C: mnem = "subsd"; break;
+              case 0x5E: mnem = "divsd"; break;
+            }
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            if (b2 == 0x2A) {
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightOperand(data);
+            } else {
+              AppendToBuffer("%s %s,%s",
+                             mnem,
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data++;
+            }
+          }
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xF3:
+        if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
+          data += 3;
+          data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xF7:
+        data += F7Instruction(data);
+        break;
+
+      default:
+        UnimplementedInstruction();
+    }
+  }
+
+  if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+    tmp_buffer_[tmp_buffer_pos_] = '\0';
+  }
+
+  int instr_len = data - instr;
+  ASSERT(instr_len > 0);  // Ensure progress.
+
+  int outp = 0;
+  // Instruction bytes.
+  for (byte* bp = instr; bp < data; bp++) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                       "%02x",
+                                       *bp);
+  }
+  for (int i = 6 - instr_len; i >= 0; i--) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                       "  ");
+  }
+
+  outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                     " %s",
+                                     tmp_buffer_.start());
+  return instr_len;
+}
+
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[8] = {
+  "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+  "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
+};
+
+
+static const char* xmm_regs[8] = {
+  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  if (0 <= reg && reg < 8) return cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  if (0 <= reg && reg < 8) return xmm_regs[reg];
+  return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // IA32 does not embed debug strings at the moment.
+  UNREACHABLE();
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
+  return d.InstructionDecode(buffer, instruction);
+}
+
+
+// The IA-32 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p", prev_pc);
+    fprintf(f, "    ");
+
+    for (byte* bp = prev_pc; bp < pc; bp++) {
+      fprintf(f, "%02x",  *bp);
+    }
+    for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+      fprintf(f, "  ");
+    }
+    fprintf(f, "  %s\n", buffer.start());
+  }
+}
+
+
+}  // namespace disasm
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
new file mode 100644
index 0000000..dea439f
--- /dev/null
+++ b/src/ia32/frames-ia32.cc
@@ -0,0 +1,116 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute the stack pointer.
+  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+  // Fill in the state.
+  state->fp = fp;
+  state->sp = sp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  // Determine frame type.
+  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+    return EXIT_DEBUG;
+  } else {
+    return EXIT;
+  }
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  // Exit frames on IA-32 do not contain any pointers. The arguments
+  // are traversed as part of the expression stack of the calling
+  // frame.
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+  int arguments;
+  if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+    // The arguments for cooked frames are traversed as if they were
+    // expression stack elements of the calling frame. The reason for
+    // this rather strange decision is that we cannot access the
+    // function during mark-compact GCs when the stack is cooked.
+    // In fact accessing heap objects (like function->shared() below)
+    // at all during GC is problematic.
+    arguments = 0;
+  } else {
+    // Compute the number of arguments by getting the number of formal
+    // parameters of the function. We must remember to take the
+    // receiver into account (+1).
+    JSFunction* function = JSFunction::cast(this->function());
+    arguments = function->shared()->formal_parameter_count() + 1;
+  }
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  const int arguments = Smi::cast(GetExpression(0))->value();
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+  // Internal frames have no arguments. The stack pointer of the
+  // caller is at a fixed offset from the frame pointer.
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
new file mode 100644
index 0000000..3a7c86b
--- /dev/null
+++ b/src/ia32/frames-ia32.h
@@ -0,0 +1,135 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_FRAMES_IA32_H_
+#define V8_IA32_FRAMES_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register lists
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 8;
+
+
+// Caller-saved registers
+static const RegList kJSCallerSaved =
+  1 << 0 |  // eax
+  1 << 1 |  // ecx
+  1 << 2 |  // edx
+  1 << 3 |  // ebx - used as a caller-saved register in JavaScript code
+  1 << 7;   // edi - callee function
+
+static const int kNumJSCallerSaved = 5;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kFPOffset    = 1 * kPointerSize;
+  static const int kStateOffset = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -6 * kPointerSize;
+
+  static const int kFunctionArgOffset   = +3 * kPointerSize;
+  static const int kReceiverArgOffset   = +4 * kPointerSize;
+  static const int kArgcOffset          = +5 * kPointerSize;
+  static const int kArgvOffset          = +6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kSPOffset        = -1 * kPointerSize;
+
+  static const int kCallerFPOffset =  0 * kPointerSize;
+  static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_FRAMES_IA32_H_
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
new file mode 100644
index 0000000..f7369a8
--- /dev/null
+++ b/src/ia32/ic-ia32.cc
@@ -0,0 +1,1042 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register r0, Register r1, Register r2,
+                                   Register name) {
+  // Register use:
+  //
+  // r0   - used to hold the property dictionary.
+  //
+  // r1   - initially the receiver
+  //      - used for the index into the property dictionary
+  //      - holds the result on exit.
+  //
+  // r2   - used to hold the capacity of the property dictionary.
+  //
+  // name - holds the name of the property and is unchanged.
+
+  Label done;
+
+  // Check for the absence of an interceptor.
+  // Load the map into r0.
+  __ mov(r0, FieldOperand(r1, JSObject::kMapOffset));
+  // Test the has_named_interceptor bit in the map.
+  __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
+          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+  // Jump to miss if the interceptor bit is set.
+  __ j(not_zero, miss_label, not_taken);
+
+  // Bail out if we have a JS global proxy object.
+  __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+  __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, miss_label, not_taken);
+
+  // Possible work-around for http://crbug.com/16276.
+  __ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+  __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+
+  // Check that the properties array is a dictionary.
+  __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+  __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+         Immediate(Factory::hash_table_map()));
+  __ j(not_equal, miss_label);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+  __ mov(r2, FieldOperand(r0, kCapacityOffset));
+  __ shr(r2, kSmiTagSize);  // convert smi to int
+  __ dec(r2);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  static const int kProbes = 4;
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  for (int i = 0; i < kProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(r1, FieldOperand(name, String::kLengthOffset));
+    __ shr(r1, String::kHashShift);
+    if (i > 0) {
+      __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, Operand(r2));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmp(name,
+           Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag));
+    if (i != kProbes - 1) {
+      __ j(equal, &done, taken);
+    } else {
+      __ j(not_equal, miss_label, not_taken);
+    }
+  }
+
+  // Check that the value is a normal property.
+  __ bind(&done);
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ j(not_zero, miss_label, not_taken);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ mov(r1, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+                                           Register value, Register scratch) {
+  Label done;
+  // Check if the value is a Smi.
+  __ test(value, Immediate(kSmiTagMask));
+  __ j(zero, &done, not_taken);
+  // Check if the object has been loaded.
+  __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
+  __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
+  __ test(scratch, Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss, not_taken);
+  __ bind(&done);
+}
+
+
+// The offset from the inlined patch site to the start of the
+// inlined load instruction.  It is 7 bytes (test eax, imm) plus
+// 6 bytes (jne slow_label).
+const int LoadIC::kOffsetToLoadInstruction = 13;
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadStringLength(masm, eax, edx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+  ASSERT(exponent <= 9);
+  ASSERT(exponent >= 1);
+  int answer = 10;
+  for (int i = 1; i < exponent; i++) answer *= 10;
+  return answer;
+}
+#endif
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, check_string, index_int, index_string, check_pixel_array;
+
+  // Load name and receiver.
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Get the map of the receiver.
+  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  __ cmp(edx, JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &check_string, not_taken);
+  __ sar(eax, kSmiTagSize);
+  // Get the elements array of the object.
+  __ bind(&index_int);
+  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array);
+  // Check that the key (index) is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  // Fast case: Do the load.
+  __ mov(eax,
+         Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, &slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+  __ ret(0);
+
+  // Check whether the elements is a pixel array.
+  // eax: untagged index
+  // ecx: elements array
+  __ bind(&check_pixel_array);
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::pixel_array_map()));
+  __ j(not_equal, &slow);
+  __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+  __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+  __ shl(eax, kSmiTagSize);
+  __ ret(0);
+
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+
+  __ bind(&check_string);
+  // The key is not a smi.
+  // Is it a string?
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+  __ j(above_equal, &slow);
+  // Is the string an array index, with cached numeric value?
+  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+  __ test(ebx, Immediate(String::kIsArrayIndexMask));
+  __ j(not_zero, &index_string, not_taken);
+
+  // If the string is a symbol, do a quick inline probe of the receiver's
+  // dictionary, if it exists.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  __ test(ebx, Immediate(kIsSymbolMask));
+  __ j(zero, &slow, not_taken);
+  // Probe the dictionary leaving result in ecx.
+  GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
+  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
+  __ mov(eax, Operand(ecx));
+  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+  __ ret(0);
+  // Array index string: If short enough use cache in length/hash field (ebx).
+  // We assert that there are enough bits in an int32_t after the hash shift
+  // bits have been subtracted to allow space for the length and the cached
+  // array index.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << (String::kShortLengthShift - String::kHashShift)));
+  __ bind(&index_string);
+  const int kLengthFieldLimit =
+      (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+  __ cmp(ebx, kLengthFieldLimit);
+  __ j(above_equal, &slow);
+  __ mov(eax, Operand(ebx));
+  __ and_(eax, (1 << String::kShortLengthShift) - 1);
+  __ shr(eax, String::kLongLengthShift);
+  __ jmp(&index_int);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, fast, array, extra, check_pixel_array;
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, key
+  // Check that the object isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  // Get the map from the receiver.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+  // Get the key from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // Check if the object is a JS array or not.
+  __ cmp(ecx, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JS object.
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+
+  // Object case: Check key against length in the elements array.
+  // eax: value
+  // edx: JSObject
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array, not_taken);
+  // Untag the key (for checking against untagged length in the fixed array).
+  __ mov(edx, Operand(ebx));
+  __ sar(edx, kSmiTagSize);  // untag the index and use it for the comparison
+  __ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
+  // eax: value
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  __ j(below, &fast, taken);
+
+  // Slow case: Push extra copies of the arguments (3).
+  __ bind(&slow);
+  __ pop(ecx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(eax);
+  __ push(ecx);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+
+  // Check whether the elements is a pixel array.
+  // eax: value
+  // ecx: elements array
+  // ebx: index (as a smi)
+  __ bind(&check_pixel_array);
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::pixel_array_map()));
+  __ j(not_equal, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow);
+  __ sar(ebx, kSmiTagSize);  // Untag the index.
+  __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(edx, eax);  // Save the value.
+  __ sar(eax, kSmiTagSize);  // Untag the value.
+  {  // Clamp the value to [0..255].
+    Label done, is_negative;
+    __ test(eax, Immediate(0xFFFFFF00));
+    __ j(zero, &done);
+    __ j(negative, &is_negative);
+    __ mov(eax, Immediate(255));
+    __ jmp(&done);
+    __ bind(&is_negative);
+    __ xor_(eax, Operand(eax));  // Clear eax.
+    __ bind(&done);
+  }
+  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+  __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+  __ mov(eax, edx);  // Return the original value.
+  __ ret(0);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // eax: value
+  // edx: JSArray
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  // flags: compare (ebx, edx.length())
+  __ j(not_equal, &slow, not_taken);  // do not leave holes in the array
+  __ sar(ebx, kSmiTagSize);  // untag
+  __ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
+  __ j(above_equal, &slow, not_taken);
+  // Restore tag and increment.
+  __ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
+  __ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
+  __ sub(Operand(ebx), Immediate(1 << kSmiTagSize));  // decrement ebx again
+  __ jmp(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode; if it is the
+  // length is always a smi.
+  __ bind(&array);
+  // eax: value
+  // edx: JSArray
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array);
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
+  __ j(above_equal, &extra, not_taken);
+
+  // Fast case: Do the store.
+  __ bind(&fast);
+  // eax: value
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
+         eax);
+  // Update write barrier for the elements array address.
+  __ mov(edx, Operand(eax));
+  __ RecordWrite(ecx, 0, edx, ebx);
+  __ ret(0);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack; 2 ~ return address, receiver
+  __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &number, not_taken);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
+  __ j(not_equal, &non_number, taken);
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, edx);
+  __ jmp(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ cmp(ebx, FIRST_NONSTRING_TYPE);
+  __ j(above_equal, &non_string, taken);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, edx);
+  __ jmp(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ cmp(edx, Factory::true_value());
+  __ j(equal, &boolean, not_taken);
+  __ cmp(edx, Factory::false_value());
+  __ j(not_equal, &miss, taken);
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+                                 int argc,
+                                 bool is_global_object,
+                                 Label* miss) {
+  // Search dictionary - put result in register edx.
+  GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
+
+  // Move the result to register edi and check that it isn't a smi.
+  __ mov(edi, Operand(edx));
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the value is a JavaScript function.
+  __ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
+  __ j(not_equal, miss, not_taken);
+
+  // Check that the function has been loaded.
+  __ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
+  __ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
+  __ test(edx, Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss, not_taken);
+
+  // Patch the receiver with the global proxy if necessary.
+  if (is_global_object) {
+    __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+
+  Label miss, global_object, non_global_object;
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack; 2 ~ return address, receiver.
+  __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the receiver is a valid JS object.
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ cmp(eax, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &miss, not_taken);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object.
+  __ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, &global_object);
+  __ cmp(eax, JS_BUILTINS_OBJECT_TYPE);
+  __ j(not_equal, &non_global_object);
+
+  // Accessing global object: Load and invoke.
+  __ bind(&global_object);
+  // Check that the global object does not require access checks.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss, not_taken);
+  GenerateNormalHelper(masm, argc, true, &miss);
+
+  // Accessing non-global object: Check for access to global proxy.
+  Label global_proxy, invoke;
+  __ bind(&non_global_object);
+  __ cmp(eax, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, &global_proxy, not_taken);
+  // Check that the non-global, non-global-proxy object does not
+  // require access checks.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss, not_taken);
+  __ bind(&invoke);
+  GenerateNormalHelper(masm, argc, false, &miss);
+
+  // Global object proxy access: Check access rights.
+  __ bind(&global_proxy);
+  __ CheckAccessGlobalProxy(edx, eax, &miss);
+  __ jmp(&invoke);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+                      int argc,
+                      const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function to call from the stack.
+  // 2 ~ receiver, return address.
+  __ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ push(edx);
+  __ push(ebx);
+
+  // Call the entry.
+  CEntryStub stub(1);
+  __ mov(eax, Immediate(2));
+  __ mov(ebx, Immediate(f));
+  __ CallStub(&stub);
+
+  // Move result to edi and exit the internal frame.
+  __ mov(edi, eax);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &invoke, not_taken);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, &global);
+  __ cmp(ecx, JS_BUILTINS_OBJECT_TYPE);
+  __ j(not_equal, &invoke);
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+  __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss, probe, global;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the receiver is a valid JS object.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ cmp(edx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &miss, not_taken);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object (unlikely).
+  __ cmp(edx, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, &global, not_taken);
+
+  // Check for non-global object that requires access check.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &miss, not_taken);
+
+  // Search the dictionary placing the result in eax.
+  __ bind(&probe);
+  GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
+  GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
+  __ ret(0);
+
+  // Global object access: Check access rights.
+  __ bind(&global);
+  __ CheckAccessGlobalProxy(eax, edx, &miss);
+  __ jmp(&probe);
+
+  // Cache miss: Restore receiver from stack and jump to runtime.
+  __ bind(&miss);
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ pop(ebx);
+  __ push(eax);  // receiver
+  __ push(ecx);  // name
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+void LoadIC::ClearInlinedVersion(Address address) {
+  // Reset the map check of the inlined inobject property load (if
+  // present) to guarantee failure by holding an invalid map (the null
+  // value).  The offset can be patched to anything.
+  PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
+}
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+  // Insert null as the map to check for to make sure the map check fails
+  // sending control flow to the IC instead of the inlined version.
+  PatchInlinedLoad(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // If the instruction following the call is not a test eax, nothing
+  // was inlined.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 4 bytes of the 7-byte
+  // operand-immediate compare instruction, so we add 3 to get the
+  // offset to the last 4 bytes.
+  Address map_address = test_instruction_address + delta + 3;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The offset is in the last 4 bytes of a six byte
+  // memory-to-register move instruction, so we add 2 to get the
+  // offset to the last 4 bytes.
+  Address offset_address =
+      test_instruction_address + delta + kOffsetToLoadInstruction + 2;
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+  return true;
+}
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // The keyed load has a fast inlined case if the IC call instruction
+  // is immediately followed by a test instruction.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  // Fetch the offset from the test instruction to the map cmp
+  // instruction.  This offset is stored in the last 4 bytes of the 5
+  // byte test instruction.
+  Address delta_address = test_instruction_address + 1;
+  int delta = *reinterpret_cast<int*>(delta_address);
+  // Compute the map address.  The map address is in the last 4 bytes
+  // of the 7-byte operand-immediate compare instruction, so we add 3
+  // to the offset to get the map address.
+  Address map_address = test_instruction_address + delta + 3;
+  // Patch the map check.
+  *(reinterpret_cast<Object**>(map_address)) = map;
+  return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ pop(ebx);
+  __ push(ecx);  // receiver
+  __ push(eax);  // name
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  __ mov(edx, Operand(esp, 4));
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : transition map
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ pop(ebx);
+  __ push(Operand(esp, 0));  // receiver
+  __ push(ecx);  // transition map
+  __ push(eax);  // value
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ebx);
+  __ push(Operand(esp, 0));
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedStoreIC_Miss(Arguments args);
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ecx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(eax);
+  __ push(ecx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : transition map
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ebx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
new file mode 100644
index 0000000..c3f2bc1
--- /dev/null
+++ b/src/ia32/jump-target-ia32.cc
@@ -0,0 +1,432 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There is an expected frame to merge to.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else if (entry_frame_ != NULL) {
+    // Forward jump with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and jump to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else {
+    // Forward jump.  Remember the current frame and emit a jump to
+    // its merge code.
+    AddReachingFrame(cgen()->frame());
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+    __ jmp(&merge_labels_.last());
+  }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint hint) {
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+
+  if (is_bound()) {
+    ASSERT(direction_ == BIDIRECTIONAL);
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.
+
+    // Swap the current frame for a copy (we do the swapping to get
+    // the off-frame registers off the fall through) to use for the
+    // branch.
+    VirtualFrame* fall_through_frame = cgen()->frame();
+    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+    RegisterFile non_frame_registers;
+    cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+    // Check if we can avoid merge code.
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    if (cgen()->frame()->Equals(entry_frame_)) {
+      // Branch right in to the block.
+      cgen()->DeleteFrame();
+      __ j(cc, &entry_label_, hint);
+      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+      return;
+    }
+
+    // Check if we can reuse existing merge code.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (reaching_frames_[i] != NULL &&
+          cgen()->frame()->Equals(reaching_frames_[i])) {
+        // Branch to the merge code.
+        cgen()->DeleteFrame();
+        __ j(cc, &merge_labels_[i], hint);
+        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+        return;
+      }
+    }
+
+    // To emit the merge code here, we negate the condition and branch
+    // around the merge code on the fall through path.
+    Label original_fall_through;
+    __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+    __ bind(&original_fall_through);
+
+  } else if (entry_frame_ != NULL) {
+    // Forward branch with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and branch to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    // Explicitly use the macro assembler instead of __ as forward
+    // branches are expected to be a fixed size (no inserted
+    // coverage-checking instructions please).  This is used in
+    // Reference::GetValue.
+    cgen()->masm()->j(cc, &entry_label_, hint);
+
+  } else {
+    // Forward branch.  A copy of the current frame is remembered and
+    // a branch to the merge code is emitted.  Explicitly use the
+    // macro assembler instead of __ as forward branches are expected
+    // to be a fixed size (no inserted coverage-checking instructions
+    // please).  This is used in Reference::GetValue.
+    AddReachingFrame(new VirtualFrame(cgen()->frame()));
+    cgen()->masm()->j(cc, &merge_labels_.last(), hint);
+  }
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  cgen()->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+  target_frame->Adjust(1);
+  // We do not expect a call with a preconfigured entry frame.
+  ASSERT(entry_frame_ == NULL);
+  AddReachingFrame(target_frame);
+  __ call(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind() {
+  ASSERT(cgen() != NULL);
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  // Fast case: the jump target was manually configured with an entry
+  // frame to use.
+  if (entry_frame_ != NULL) {
+    // Assert no reaching frames to deal with.
+    ASSERT(reaching_frames_.is_empty());
+    ASSERT(!cgen()->has_valid_frame());
+
+    RegisterFile empty;
+    if (direction_ == BIDIRECTIONAL) {
+      // Copy the entry frame so the original can be used for a
+      // possible backward jump.
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    } else {
+      // Take ownership of the entry frame.
+      cgen()->SetFrame(entry_frame_, &empty);
+      entry_frame_ = NULL;
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (!is_linked()) {
+    ASSERT(cgen()->has_valid_frame());
+    if (direction_ == FORWARD_ONLY) {
+      // Fast case: no forward jumps and no possible backward jumps.
+      // The stack pointer can be floating above the top of the
+      // virtual frame before the bind.  Afterward, it should not.
+      VirtualFrame* frame = cgen()->frame();
+      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+      if (difference > 0) {
+        frame->stack_pointer_ -= difference;
+        __ add(Operand(esp), Immediate(difference * kPointerSize));
+      }
+    } else {
+      ASSERT(direction_ == BIDIRECTIONAL);
+      // Fast case: no forward jumps, possible backward ones.  Remove
+      // constants and copies above the watermark on the fall-through
+      // frame and use it as the entry frame.
+      cgen()->frame()->MakeMergable();
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (direction_ == FORWARD_ONLY &&
+      !cgen()->has_valid_frame() &&
+      reaching_frames_.length() == 1) {
+    // Fast case: no fall-through, a single forward jump, and no
+    // possible backward jumps.  Pick up the only reaching frame, take
+    // ownership of it, and use it for the block about to be emitted.
+    VirtualFrame* frame = reaching_frames_[0];
+    RegisterFile empty;
+    cgen()->SetFrame(frame, &empty);
+    reaching_frames_[0] = NULL;
+    __ bind(&merge_labels_[0]);
+
+    // The stack pointer can be floating above the top of the
+    // virtual frame before the bind.  Afterward, it should not.
+    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+    if (difference > 0) {
+      frame->stack_pointer_ -= difference;
+      __ add(Operand(esp), Immediate(difference * kPointerSize));
+    }
+
+    __ bind(&entry_label_);
+    return;
+  }
+
+  // If there is a current frame, record it as the fall-through.  It
+  // is owned by the reaching frames for now.
+  bool had_fall_through = false;
+  if (cgen()->has_valid_frame()) {
+    had_fall_through = true;
+    AddReachingFrame(cgen()->frame());  // Return value ignored.
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+  }
+
+  // Compute the frame to use for entry to the block.
+  ComputeEntryFrame();
+
+  // Some moves required to merge to an expected frame require purely
+  // frame state changes, and do not require any code generation.
+  // Perform those first to increase the possibility of finding equal
+  // frames below.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    if (reaching_frames_[i] != NULL) {
+      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+    }
+  }
+
+  if (is_linked()) {
+    // There were forward jumps.  Handle merging the reaching frames
+    // to the entry frame.
+
+    // Loop over the (non-null) reaching frames and process any that
+    // need merge code.  Iterate backwards through the list to handle
+    // the fall-through frame first.  Set frames that will be
+    // processed after 'i' to NULL if we want to avoid processing
+    // them.
+    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+      VirtualFrame* frame = reaching_frames_[i];
+
+      if (frame != NULL) {
+        // Does the frame (probably) need merge code?
+        if (!frame->Equals(entry_frame_)) {
+          // We could have a valid frame as the fall through to the
+          // binding site or as the fall through from a previous merge
+          // code block.  Jump around the code we are about to
+          // generate.
+          if (cgen()->has_valid_frame()) {
+            cgen()->DeleteFrame();
+            __ jmp(&entry_label_);
+          }
+          // Pick up the frame for this block.  Assume ownership if
+          // there cannot be backward jumps.
+          RegisterFile empty;
+          if (direction_ == BIDIRECTIONAL) {
+            cgen()->SetFrame(new VirtualFrame(frame), &empty);
+          } else {
+            cgen()->SetFrame(frame, &empty);
+            reaching_frames_[i] = NULL;
+          }
+          __ bind(&merge_labels_[i]);
+
+          // Loop over the remaining (non-null) reaching frames,
+          // looking for any that can share merge code with this one.
+          for (int j = 0; j < i; j++) {
+            VirtualFrame* other = reaching_frames_[j];
+            if (other != NULL && other->Equals(cgen()->frame())) {
+              // Set the reaching frame element to null to avoid
+              // processing it later, and then bind its entry label.
+              reaching_frames_[j] = NULL;
+              __ bind(&merge_labels_[j]);
+            }
+          }
+
+          // Emit the merge code.
+          cgen()->frame()->MergeTo(entry_frame_);
+        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+          // If this is the fall through frame, and it didn't need
+          // merge code, we need to pick up the frame so we can jump
+          // around subsequent merge blocks if necessary.
+          RegisterFile empty;
+          cgen()->SetFrame(frame, &empty);
+          reaching_frames_[i] = NULL;
+        }
+      }
+    }
+
+    // The code generator may not have a current frame if there was no
+    // fall through and none of the reaching frames needed merging.
+    // In that case, clone the entry frame as the current frame.
+    if (!cgen()->has_valid_frame()) {
+      RegisterFile empty;
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    }
+
+    // There may be unprocessed reaching frames that did not need
+    // merge code.  They will have unbound merge labels.  Bind their
+    // merge labels to be the same as the entry label and deallocate
+    // them.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (!merge_labels_[i].is_bound()) {
+        reaching_frames_[i] = NULL;
+        __ bind(&merge_labels_[i]);
+      }
+    }
+
+    // There are non-NULL reaching frames with bound labels for each
+    // merge block, but only on backward targets.
+  } else {
+    // There were no forward jumps.  There must be a current frame and
+    // this must be a bidirectional target.
+    ASSERT(reaching_frames_.length() == 1);
+    ASSERT(reaching_frames_[0] != NULL);
+    ASSERT(direction_ == BIDIRECTIONAL);
+
+    // Use a copy of the reaching frame so the original can be saved
+    // for possible reuse as a backward merge block.
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+    __ bind(&merge_labels_[0]);
+    cgen()->frame()->MergeTo(entry_frame_);
+  }
+
+  __ bind(&entry_label_);
+}
+
+
+void BreakTarget::Jump() {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+  }
+  DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_ + 1);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
new file mode 100644
index 0000000..e83bb92
--- /dev/null
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -0,0 +1,1192 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
+}
+
+
+static void RecordWriteHelper(MacroAssembler* masm,
+                              Register object,
+                              Register addr,
+                              Register scratch) {
+  Label fast;
+
+  // Compute the page start address from the heap object pointer, and reuse
+  // the 'object' register for it.
+  masm->and_(object, ~Page::kPageAlignmentMask);
+  Register page_start = object;
+
+  // Compute the bit addr in the remembered set/index of the pointer in the
+  // page. Reuse 'addr' as pointer_offset.
+  masm->sub(addr, Operand(page_start));
+  masm->shr(addr, kObjectAlignmentBits);
+  Register pointer_offset = addr;
+
+  // If the bit offset lies beyond the normal remembered set range, it is in
+  // the extra remembered set area of a large object.
+  masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
+  masm->j(less, &fast);
+
+  // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+  // extra remembered set after the large object.
+
+  // Find the length of the large object (FixedArray).
+  masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+                                         + FixedArray::kLengthOffset));
+  Register array_length = scratch;
+
+  // Extra remembered set starts right after the large object (a FixedArray), at
+  //   page_start + kObjectStartOffset + objectSize
+  // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+  // Add the delta between the end of the normal RSet and the start of the
+  // extra RSet to 'page_start', so that addressing the bit using
+  // 'pointer_offset' hits the extra RSet words.
+  masm->lea(page_start,
+            Operand(page_start, array_length, times_pointer_size,
+                    Page::kObjectStartOffset + FixedArray::kHeaderSize
+                        - Page::kRSetEndOffset));
+
+  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+  // to limit code size. We should probably evaluate this decision by
+  // measuring the performance of an equivalent implementation using
+  // "simpler" instructions
+  masm->bind(&fast);
+  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits of three registers (object, address and
+  // scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class AddressBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  RecordWriteHelper(masm, object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object, int offset,
+                                 Register value, Register scratch) {
+  // First, check if a remembered set write is even needed. The tests below
+  // catch stores of Smis and stores into young gen (which does not have space
+  // for the remembered set bits.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  ASSERT_EQ(0, kSmiTag);
+  test(value, Immediate(kSmiTagMask));
+  j(zero, &done);
+
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(value, Operand(object));
+    and_(value, Heap::NewSpaceMask());
+    cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
+    j(equal, &done);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(value, Operand(object, -new_space_start));
+    and_(value, Heap::NewSpaceMask());
+    j(equal, &done);
+  }
+
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+    // Compute the bit offset in the remembered set, leave it in 'value'.
+    lea(value, Operand(object, offset));
+    and_(value, Page::kPageAlignmentMask);
+    shr(value, kPointerSizeLog2);
+
+    // Compute the page address from the heap object pointer, leave it in
+    // 'object'.
+    and_(object, ~Page::kPageAlignmentMask);
+
+    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+    // to limit code size. We should probably evaluate this decision by
+    // measuring the performance of an equivalent implementation using
+    // "simpler" instructions
+    bts(Operand(object, Page::kRSetOffset), value);
+  } else {
+    Register dst = scratch;
+    if (offset != 0) {
+      lea(dst, Operand(object, offset));
+    } else {
+      // array access: calculate the destination address in the same manner as
+      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+      // into an array of words.
+      ASSERT_EQ(1, kSmiTagSize);
+      ASSERT_EQ(0, kSmiTag);
+      lea(dst, Operand(object, dst, times_half_pointer_size,
+                       FixedArray::kHeaderSize - kHeapObjectTag));
+    }
+    // If we are already generating a shared stub, not inlining the
+    // record write code isn't going to save us any memory.
+    if (generating_stub()) {
+      RecordWriteHelper(this, object, dst, value);
+    } else {
+      RecordWriteStub stub(object, dst, value);
+      CallStub(&stub);
+    }
+  }
+
+  bind(&done);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of registers to memory location.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(Operand::StaticVariable(reg_addr), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of memory location to registers.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(reg, Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Push the content of the memory location to the stack.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      push(Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Pop the content from the stack to the memory location.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      pop(Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+                                                    Register scratch,
+                                                    RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the stack to the memory location and adjust base.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      mov(scratch, Operand(base, 0));
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(Operand::StaticVariable(reg_addr), scratch);
+      lea(base, Operand(base, kPointerSize));
+    }
+  }
+}
+#endif
+
+void MacroAssembler::Set(Register dst, const Immediate& x) {
+  if (x.is_zero()) {
+    xor_(dst, Operand(dst));  // shorter than mov
+  } else {
+    mov(dst, x);
+  }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
+  mov(dst, x);
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+                                   InstanceType type,
+                                   Register map) {
+  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+       static_cast<int8_t>(type));
+}
+
+
+void MacroAssembler::FCmp() {
+  fucompp();
+  push(eax);
+  fnstsw_ax();
+  sahf();
+  pop(eax);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  push(ebp);
+  mov(ebp, Operand(esp));
+  push(esi);
+  push(Immediate(Smi::FromInt(type)));
+  push(Immediate(CodeObject()));
+  if (FLAG_debug_code) {
+    cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
+    Check(not_equal, "code object not properly patched");
+  }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  if (FLAG_debug_code) {
+    cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+        Immediate(Smi::FromInt(type)));
+    Check(equal, "stack frame types must match");
+  }
+  leave();
+}
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+  // Setup the frame structure on the stack.
+  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  push(ebp);
+  mov(ebp, Operand(esp));
+
+  // Reserve room for entry stack pointer and push the debug marker.
+  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(0));  // saved entry sp, patched before call
+  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+
+  // Save the frame pointer and the context in top.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  ExternalReference context_address(Top::k_context_address);
+  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+  mov(Operand::StaticVariable(context_address), esi);
+
+  // Setup argc and argv in callee-saved registers.
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  mov(edi, Operand(eax));
+  lea(esi, Operand(ebp, eax, times_4, offset));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Save the state of all registers to the stack from the memory
+  // location. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // TODO(1243899): This should be symmetric to
+    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+    // correct here, but computed for the other call. Very error
+    // prone! FIX THIS.  Actually there are deeper problems with
+    // register saving than this asymmetry (see the bug report
+    // associated with this issue).
+    PushRegistersFromMemory(kJSCallerSaved);
+  }
+#endif
+
+  // Reserve space for two arguments: argc and argv.
+  sub(Operand(esp), Immediate(2 * kPointerSize));
+
+  // Get the required frame alignment for the OS.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    and_(esp, -kFrameAlignment);
+  }
+
+  // Patch the saved entry sp.
+  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Restore the memory copy of the registers by digging them out from
+  // the stack. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // It's okay to clobber register ebx below because we don't need
+    // the function pointer after this.
+    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    lea(ebx, Operand(ebp, kOffset));
+    CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
+  }
+#endif
+
+  // Get the return address from the stack and restore the frame pointer.
+  mov(ecx, Operand(ebp, 1 * kPointerSize));
+  mov(ebp, Operand(ebp, 0 * kPointerSize));
+
+  // Pop the arguments and the receiver from the caller stack.
+  lea(esp, Operand(esi, 1 * kPointerSize));
+
+  // Restore current context from top and clear it in debug mode.
+  ExternalReference context_address(Top::k_context_address);
+  mov(esi, Operand::StaticVariable(context_address));
+#ifdef DEBUG
+  mov(Operand::StaticVariable(context_address), Immediate(0));
+#endif
+
+  // Push the return address to get ready to return.
+  push(ecx);
+
+  // Clear the top frame.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  // The pc (return address) is already on TOS.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
+    push(ebp);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for ebp. We expect the code throwing an exception to check ebp
+    // before dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(0));  // NULL frame pointer.
+  }
+  // Save the current handler as the next handler.
+  push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  // Link this handler as the new current one.
+  mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+                                   JSObject* holder, Register holder_reg,
+                                   Register scratch,
+                                   Label* miss) {
+  // Make sure there's no overlap between scratch and the other
+  // registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 1;
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  while (object != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+    JSObject* prototype = JSObject::cast(object->GetPrototype());
+    if (Heap::InNewSpace(prototype)) {
+      // Get the map of the current object.
+      mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
+      // Branch on the result of the map check.
+      j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
+        mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+
+    } else {
+      // Check the map of the current object.
+      cmp(FieldOperand(reg, HeapObject::kMapOffset),
+          Immediate(Handle<Map>(object->map())));
+      // Branch on the result of the map check.
+      j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      mov(reg, Handle<JSObject>(prototype));
+    }
+
+    // Go to the next object in the prototype chain.
+    object = prototype;
+  }
+
+  // Check the holder map.
+  cmp(FieldOperand(reg, HeapObject::kMapOffset),
+      Immediate(Handle<Map>(holder->map())));
+  j(not_equal, miss, not_taken);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth));
+
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(object == holder);
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+  if (object->IsJSGlobalProxy()) {
+    CheckAccessGlobalProxy(reg, scratch, miss);
+  }
+  return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+
+  // Load current lexical context from the stack frame.
+  mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // When generating debug code, make sure the lexical context is set.
+  if (FLAG_debug_code) {
+    cmp(Operand(scratch), Immediate(0));
+    Check(not_equal, "we should not have an empty lexical context");
+  }
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  mov(scratch, FieldOperand(scratch, offset));
+  mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    push(scratch);
+    // Read the first word and compare to global_context_map.
+    mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+    cmp(scratch, Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(scratch);
+  }
+
+  // Check if both contexts are the same.
+  cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  j(equal, &same_contexts, taken);
+
+  // Compare security tokens, save holder_reg on the stack so we can use it
+  // as a temporary register.
+  //
+  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+  push(holder_reg);
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    cmp(holder_reg, Factory::null_value());
+    Check(not_equal, "JSGlobalProxy::context() should not be null.");
+
+    push(holder_reg);
+    // Read the first word and compare to global_context_map(),
+    mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+    cmp(holder_reg, Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(holder_reg);
+  }
+
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  mov(scratch, FieldOperand(scratch, token_offset));
+  cmp(scratch, FieldOperand(holder_reg, token_offset));
+  pop(holder_reg);
+  j(not_equal, miss, not_taken);
+
+  bind(&same_contexts);
+}
+
+
+void MacroAssembler::LoadAllocationTopHelper(Register result,
+                                             Register result_end,
+                                             Register scratch,
+                                             AllocationFlags flags) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Just return if allocation top is already known.
+  if ((flags & RESULT_CONTAINS_TOP) != 0) {
+    // No use of scratch if allocation top is provided.
+    ASSERT(scratch.is(no_reg));
+#ifdef DEBUG
+    // Assert that result actually contains top on entry.
+    cmp(result, Operand::StaticVariable(new_space_allocation_top));
+    Check(equal, "Unexpected allocation top");
+#endif
+    return;
+  }
+
+  // Move address of new object to result. Use scratch register if available.
+  if (scratch.is(no_reg)) {
+    mov(result, Operand::StaticVariable(new_space_allocation_top));
+  } else {
+    ASSERT(!scratch.is(result_end));
+    mov(Operand(scratch), Immediate(new_space_allocation_top));
+    mov(result, Operand(scratch, 0));
+  }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+                                               Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Update new top. Use scratch if available.
+  if (scratch.is(no_reg)) {
+    mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+  } else {
+    mov(Operand(scratch, 0), result_end);
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, object_size));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required, not_taken);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int header_size,
+                                        ScaleFactor element_size,
+                                        Register element_count,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, element_count, element_size, header_size));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  if (!object_size.is(result_end)) {
+    mov(result_end, object_size);
+  }
+  add(result_end, Operand(result));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required, not_taken);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Make sure the object has no tag before resetting top.
+  and_(Operand(object), Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+  cmp(object, Operand::StaticVariable(new_space_allocation_top));
+  Check(below, "Undo allocation of non allocated memory");
+#endif
+  mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
+void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
+                                      Register result,
+                                      Register op,
+                                      JumpTarget* then_target) {
+  JumpTarget ok;
+  test(result, Operand(result));
+  ok.Branch(not_zero, taken);
+  test(op, Operand(op));
+  then_target->Branch(sign, not_taken);
+  ok.Bind();
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+                                      Register op,
+                                      Label* then_label) {
+  Label ok;
+  test(result, Operand(result));
+  j(not_zero, &ok, taken);
+  test(op, Operand(op));
+  j(sign, then_label, not_taken);
+  bind(&ok);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+                                      Register op1,
+                                      Register op2,
+                                      Register scratch,
+                                      Label* then_label) {
+  Label ok;
+  test(result, Operand(result));
+  j(not_zero, &ok, taken);
+  mov(scratch, Operand(op1));
+  or_(scratch, Operand(op2));
+  j(sign, then_label, not_taken);
+  bind(&ok);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  test(function, Immediate(kSmiTagMask));
+  j(zero, miss, not_taken);
+
+  // Check that the function really is a function.
+  CmpObjectType(function, JS_FUNCTION_TYPE, result);
+  j(not_equal, miss, not_taken);
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+  j(not_zero, &non_instance, not_taken);
+
+  // Get the prototype or initial map from the function.
+  mov(result,
+      FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  cmp(Operand(result), Immediate(Factory::the_hole_value()));
+  j(equal, miss, not_taken);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CmpObjectType(result, MAP_TYPE, scratch);
+  j(not_equal, &done);
+
+  // Get the prototype from the initial map.
+  mov(result, FieldOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  call(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  ASSERT(argc >= 1 && generating_stub());
+  ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+  }
+  mov(eax, Immediate(Factory::undefined_value()));
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  Runtime::FunctionId function_id =
+      static_cast<Runtime::FunctionId>(f->stub_id);
+  RuntimeStub stub(function_id, num_arguments);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(eax, Immediate(num_arguments));
+  JumpToRuntime(ext);
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
+  // Set the entry point and jump to the C entry runtime stub.
+  mov(ebx, Immediate(ext));
+  CEntryStub ces(1);
+  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    const Operand& code_operand,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      mov(eax, actual.immediate());
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        mov(ebx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmp(expected.reg(), actual.immediate());
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(ebx));
+      mov(eax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmp(expected.reg(), Operand(actual.reg()));
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(eax));
+      ASSERT(expected.reg().is(ebx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      mov(edx, Immediate(code_constant));
+      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_operand.is_reg(edx)) {
+      mov(edx, code_operand);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      call(adaptor, RelocInfo::CODE_TARGET);
+      jmp(done);
+    } else {
+      jmp(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(const Operand& code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+  Operand dummy(eax);
+  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code, rmode);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code, rmode);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(fun.is(edi));
+  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+  ParameterCount expected(ebx);
+  InvokeCode(Operand(edx), expected, actual, flag);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+
+  // Rely on the assertion to check that the number of provided
+  // arguments match the expected number of arguments. Fake a
+  // parameter count to avoid emitting code to do the check.
+  ParameterCount expected(0);
+  InvokeCode(Handle<Code>(code), expected, expected,
+             RelocInfo::CODE_TARGET, flag);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+    unresolved_.Add(entry);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+
+  mov(Operand(target), Immediate(code));
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+    unresolved_.Add(entry);
+  }
+  add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  // Move the builtin function into the temporary function slot by
+  // reading it from the builtins object. NOTE: We should be able to
+  // reduce this to two instructions by putting the function table in
+  // the global object instead of the "builtins" object and by using a
+  // real register for the function.
+  mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+  int builtins_offset =
+      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+  mov(edi, FieldOperand(edx, builtins_offset));
+
+
+  return Builtins::GetCode(id, resolved);
+}
+
+
+void MacroAssembler::Ret() {
+  ret(0);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Operand operand = Operand::StaticVariable(ExternalReference(counter));
+    if (value == 1) {
+      inc(operand);
+    } else {
+      add(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Operand operand = Operand::StaticVariable(ExternalReference(counter));
+    if (value == 1) {
+      dec(operand);
+    } else {
+      sub(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+  if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+  Label L;
+  j(cc, &L, taken);
+  Abort(msg);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  push(eax);
+  push(Immediate(p0));
+  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
new file mode 100644
index 0000000..ed72c96
--- /dev/null
+++ b/src/ia32/macro-assembler-ia32.h
@@ -0,0 +1,411 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
+#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  // Set the remembered set bit for [object+offset].
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void PushRegistersFromMemory(RegList regs);
+  void PopRegistersToMemory(RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register eax and
+  // sets up the number of arguments in register edi and the pointer
+  // to the first argument in register esi.
+  void EnterExitFrame(StackFrame::Type type);
+
+  // Leave the current exit frame. Expects the return value in
+  // register eax:edx (untouched) and the pointer to the first
+  // argument in register esi.
+  void LeaveExitFrame(StackFrame::Type type);
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(const Operand& code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+
+  // Store the code object for the given builtin in the target register.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  // Expression support
+  void Set(Register dst, const Immediate& x);
+  void Set(const Operand& dst, const Immediate& x);
+
+  // Compare object type for heap object.
+  // Incoming register is heap_object and outgoing register is map.
+  void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+  // Compare instance type for map.
+  void CmpInstanceType(Register map, InstanceType type);
+
+  // FCmp is similar to integer cmp, but requires unsigned
+  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+  void FCmp();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generates code that verifies that the maps of objects in the
+  // prototype chain of object hasn't changed since the code was
+  // generated and branches to the miss label if any map has. If
+  // necessary the function also generates code for security check
+  // in case of global object holders. The scratch and holder
+  // registers are always clobbered, but the object register is only
+  // clobbered if it the same as the holder register. The function
+  // returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckMaps(JSObject* object, Register object_reg,
+                     JSObject* holder, Register holder_reg,
+                     Register scratch, Label* miss);
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, but the scratch register is clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space. If the new space is exhausted control
+  // continues at the gc_required label. The allocated object is returned in
+  // result and end of the new object is returned in result_end. The register
+  // scratch can be passed as no_reg in which case an additional object
+  // reference will be added to the reloc info. The returned pointers in result
+  // and result_end have not yet been tagged as heap objects. If
+  // result_contains_top_on_entry is true the contnt of result is known to be
+  // the allocation top on entry (could be result_end from a previous call to
+  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+  // should be no_reg as it is never used.
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(int header_size,
+                          ScaleFactor element_size,
+                          Register element_count,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. Make sure that no pointers are left to the
+  // object(s) no longer allocated as they would be invalid when allocation is
+  // un-done.
+  void UndoAllocationInNewSpace(Register object);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Check if result is zero and op is negative.
+  void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+  // Check if result is zero and op is negative in code using jump targets.
+  void NegativeZeroTest(CodeGenerator* cgen,
+                        Register result,
+                        Register op,
+                        JumpTarget* then_target);
+
+  // Check if result is zero and any of op1 and op2 are negative.
+  // Register scratch is destroyed, and it must be different from op2.
+  void NegativeZeroTest(Register result, Register op1, Register op2,
+                        Register scratch, Label* then_label);
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of arguments.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& ext);
+
+
+  // ---------------------------------------------------------------------------
+  // Utilities
+
+  void Ret();
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value);
+  void IncrementCounter(StatsCounter* counter, int value);
+  void DecrementCounter(StatsCounter* counter, int value);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  Handle<Object> code_object_;  // This handle will be patched with the
+                                // code object on installation.
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      const Operand& code_operand,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Allocation support helpers.
+  void LoadAllocationTopHelper(Register result,
+                               Register result_end,
+                               Register scratch,
+                               AllocationFlags flags);
+  void UpdateAllocationTopHelper(Register result_end, Register scratch);
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int size);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+static inline Operand FieldOperand(Register object, int offset) {
+  return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
+  return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) {                                               \
+    byte* ia32_coverage_function =                                        \
+        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+    masm->pushfd();                                                       \
+    masm->pushad();                                                       \
+    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
+    masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY);         \
+    masm->pop(eax);                                                       \
+    masm->popad();                                                        \
+    masm->popfd();                                                        \
+  }                                                                       \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
new file mode 100644
index 0000000..7af4e89
--- /dev/null
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -0,0 +1,1174 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "unicode.h"
+#include "log.h"
+#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "ia32/macro-assembler-ia32.h"
+#include "ia32/regexp-macro-assembler-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_NATIVE_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - edx : current character. Must be loaded using LoadCurrentCharacter
+ *         before using any of the dispatch methods.
+ * - edi : current position in input, as negative offset from end of string.
+ *         Please notice that this is the byte offset, not the character offset!
+ * - esi : end of input (points to byte after last character in input).
+ * - ebp : frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - esp : points to tip of C stack.
+ * - ecx : points to tip of backtrack stack
+ *
+ * The registers eax, ebx and ecx are free to use for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ *       - stack_area_base     (High end of the memory area to use as
+ *                             backtracking stack)
+ *       - at_start           (if 1, start at start of string, if 0, don't)
+ *       - int* capture_array (int[num_saved_registers_], for output).
+ *       - end of input       (Address of end of string)
+ *       - start of input     (Address of first character in string)
+ *       - void* input_string (location of a handle containing the string)
+ *       --- frame alignment (if applicable) ---
+ *       - return address
+ * ebp-> - old ebp
+ *       - backup of caller esi
+ *       - backup of caller edi
+ *       - backup of caller ebx
+ *       - Offset of location before start of input (effectively character
+ *         position -1). Used to initialize capture registers to a non-position.
+ *       - register 0  ebp[-4]  (Only positions must be stored in the first
+ *       - register 1  ebp[-8]   num_saved_registers_ registers)
+ *       - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              bool at_start,
+ *              byte* stack_area_base)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
+    Mode mode,
+    int registers_to_save)
+    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_() {
+  ASSERT_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code later.
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerIA32::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    Label inside_string;
+    __ add(Operand(edi), Immediate(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ add(register_location(reg), Immediate(by));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(ebx);
+  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(ebx));
+}
+
+
+void RegExpMacroAssemblerIA32::Bind(Label* label) {
+  __ bind(label);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ cmp(current_character(), c);
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ cmp(current_character(), limit);
+  BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, &not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(eax, Operand(esi, edi, times_1, 0));
+  __ cmp(eax, Operand(ebp, kInputStart));
+  BranchOrBacktrack(equal, on_at_start);
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, on_not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(eax, Operand(esi, edi, times_1, 0));
+  __ cmp(eax, Operand(ebp, kInputStart));
+  BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ cmp(current_character(), limit);
+  BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
+                                               int cp_offset,
+                                               Label* on_failure,
+                                               bool check_end_of_string) {
+  int byte_length = str.length() * char_size();
+  int byte_offset = cp_offset * char_size();
+  if (check_end_of_string) {
+    // Check that there are at least str.length() characters left in the input.
+    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+    BranchOrBacktrack(greater, on_failure);
+  }
+
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack, (re)use the global backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ cmpb(Operand(esi, edi, times_1, byte_offset + i),
+              static_cast<int8_t>(str[i]));
+    } else {
+      ASSERT(mode_ == UC16);
+      __ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)),
+              Immediate(str[i]));
+    }
+    BranchOrBacktrack(not_equal, on_failure);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
+  Label fallthrough;
+  __ cmp(edi, Operand(backtrack_stackpointer(), 0));
+  __ j(not_equal, &fallthrough);
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
+  BranchOrBacktrack(no_condition, on_equal);
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  __ mov(edx, register_location(start_reg));  // Index of start of capture
+  __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
+  __ sub(ebx, Operand(edx));  // Length of capture.
+
+  // The length of a capture should not be negative. This can only happen
+  // if the end of the capture is unrecorded, or at a point earlier than
+  // the start of the capture.
+  BranchOrBacktrack(less, on_no_match, not_taken);
+
+  // If length is zero, either the capture is empty or it is completely
+  // uncaptured. In either case succeed immediately.
+  __ j(equal, &fallthrough);
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_increment;
+    // Save register contents to make the registers available below.
+    __ push(edi);
+    __ push(backtrack_stackpointer());
+    // After this, the eax, ecx, and edi registers are available.
+
+    __ add(edx, Operand(esi));  // Start of capture
+    __ add(edi, Operand(esi));  // Start of text to match against capture.
+    __ add(ebx, Operand(edi));  // End of text to match against capture.
+
+    Label loop;
+    __ bind(&loop);
+    __ movzx_b(eax, Operand(edi, 0));
+    __ cmpb_al(Operand(edx, 0));
+    __ j(equal, &loop_increment);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ or_(eax, 0x20);  // Convert match character to lower-case.
+    __ lea(ecx, Operand(eax, -'a'));
+    __ cmp(ecx, static_cast<int32_t>('z' - 'a'));  // Is eax a lowercase letter?
+    __ j(above, &fail);
+    // Also convert capture character.
+    __ movzx_b(ecx, Operand(edx, 0));
+    __ or_(ecx, 0x20);
+
+    __ cmp(eax, Operand(ecx));
+    __ j(not_equal, &fail);
+
+    __ bind(&loop_increment);
+    // Increment pointers into match and capture strings.
+    __ add(Operand(edx), Immediate(1));
+    __ add(Operand(edi), Immediate(1));
+    // Compare to end of match, and loop if not done.
+    __ cmp(edi, Operand(ebx));
+    __ j(below, &loop, taken);
+    __ jmp(&success);
+
+    __ bind(&fail);
+    // Restore original values before failing.
+    __ pop(backtrack_stackpointer());
+    __ pop(edi);
+    BranchOrBacktrack(no_condition, on_no_match);
+
+    __ bind(&success);
+    // Restore original value before continuing.
+    __ pop(backtrack_stackpointer());
+    // Drop original value of character position.
+    __ add(Operand(esp), Immediate(kPointerSize));
+    // Compute new value of character position after the matched part.
+    __ sub(edi, Operand(esi));
+  } else {
+    ASSERT(mode_ == UC16);
+    // Save registers before calling C function.
+    __ push(esi);
+    __ push(edi);
+    __ push(backtrack_stackpointer());
+    __ push(ebx);
+
+    const int argument_count = 3;
+    FrameAlign(argument_count, ecx);
+    // Put arguments into allocated stack area, last argument highest on stack.
+    // Parameters are
+    //   Address byte_offset1 - Address captured substring's start.
+    //   Address byte_offset2 - Address of current character position.
+    //   size_t byte_length - length of capture in bytes(!)
+
+    // Set byte_length.
+    __ mov(Operand(esp, 2 * kPointerSize), ebx);
+    // Set byte_offset2.
+    // Found by adding negative string-end offset of current position (edi)
+    // to end of string.
+    __ add(edi, Operand(esi));
+    __ mov(Operand(esp, 1 * kPointerSize), edi);
+    // Set byte_offset1.
+    // Start of capture, where edx already holds string-end negative offset.
+    __ add(edx, Operand(esi));
+    __ mov(Operand(esp, 0 * kPointerSize), edx);
+
+    ExternalReference compare =
+        ExternalReference::re_case_insensitive_compare_uc16();
+    CallCFunction(compare, argument_count);
+    // Pop original values before reacting on result value.
+    __ pop(ebx);
+    __ pop(backtrack_stackpointer());
+    __ pop(edi);
+    __ pop(esi);
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ or_(eax, Operand(eax));
+    BranchOrBacktrack(zero, on_no_match);
+    // On success, increment position by length of capture.
+    __ add(edi, Operand(ebx));
+  }
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+  Label fail;
+
+  // Find length of back-referenced capture.
+  __ mov(edx, register_location(start_reg));
+  __ mov(eax, register_location(start_reg + 1));
+  __ sub(eax, Operand(edx));  // Length to check.
+  // Fail on partial or illegal capture (start of capture after end of capture).
+  BranchOrBacktrack(less, on_no_match);
+  // Succeed on empty capture (including no capture)
+  __ j(equal, &fallthrough);
+
+  // Check that there are sufficient characters left in the input.
+  __ mov(ebx, edi);
+  __ add(ebx, Operand(eax));
+  BranchOrBacktrack(greater, on_no_match);
+
+  // Save register to make it available below.
+  __ push(backtrack_stackpointer());
+
+  // Compute pointers to match string and capture string
+  __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
+  __ add(edx, Operand(esi));  // Start of capture.
+  __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ movzx_b(eax, Operand(edx, 0));
+    __ cmpb_al(Operand(ebx, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ movzx_w(eax, Operand(edx, 0));
+    __ cmpw_ax(Operand(ebx, 0));
+  }
+  __ j(not_equal, &fail);
+  // Increment pointers into capture and match string.
+  __ add(Operand(edx), Immediate(char_size()));
+  __ add(Operand(ebx), Immediate(char_size()));
+  // Check if we have reached end of match area.
+  __ cmp(ebx, Operand(ecx));
+  __ j(below, &loop);
+  __ jmp(&success);
+
+  __ bind(&fail);
+  // Restore backtrack stackpointer.
+  __ pop(backtrack_stackpointer());
+  BranchOrBacktrack(no_condition, on_no_match);
+
+  __ bind(&success);
+  // Move current character position to position after match.
+  __ mov(edi, ecx);
+  __ sub(Operand(edi), esi);
+  // Restore backtrack stackpointer.
+  __ pop(backtrack_stackpointer());
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotRegistersEqual(int reg1,
+                                                      int reg2,
+                                                      Label* on_not_equal) {
+  __ mov(eax, register_location(reg1));
+  __ cmp(eax, register_location(reg2));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
+                                                 Label* on_not_equal) {
+  __ cmp(current_character(), c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
+                                                      uint32_t mask,
+                                                      Label* on_equal) {
+  __ mov(eax, current_character());
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
+                                                         uint32_t mask,
+                                                         Label* on_not_equal) {
+  __ mov(eax, current_character());
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  ASSERT(minus < String::kMaxUC16CharCode);
+  __ lea(eax, Operand(current_character(), -minus));
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
+                                                          int cp_offset,
+                                                          bool check_offset,
+                                                          Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+  case 's':
+    // Match space-characters
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      if (check_offset) {
+        LoadCurrentCharacter(cp_offset, on_no_match);
+      } else {
+        LoadCurrentCharacterUnchecked(cp_offset, 1);
+      }
+      Label success;
+      __ cmp(current_character(), ' ');
+      __ j(equal, &success);
+      // Check range 0x09..0x0d
+      __ sub(Operand(current_character()), Immediate('\t'));
+      __ cmp(current_character(), '\r' - '\t');
+      BranchOrBacktrack(above, on_no_match);
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      __ cmp(current_character(), ' ');
+      BranchOrBacktrack(equal, on_no_match);
+      __ sub(Operand(current_character()), Immediate('\t'));
+      __ cmp(current_character(), '\r' - '\t');
+      BranchOrBacktrack(below_equal, on_no_match);
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9')
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(Operand(current_character()), Immediate('0'));
+    __ cmp(current_character(), '9' - '0');
+    BranchOrBacktrack(above, on_no_match);
+    return true;
+  case 'D':
+    // Match non ASCII-digits
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(Operand(current_character()), Immediate('0'));
+    __ cmp(current_character(), '9' - '0');
+    BranchOrBacktrack(below_equal, on_no_match);
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ xor_(Operand(current_character()), Immediate(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+    __ sub(Operand(current_character()), Immediate(0x0b));
+    __ cmp(current_character(), 0x0c - 0x0b);
+    BranchOrBacktrack(below_equal, on_no_match);
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ sub(Operand(current_character()), Immediate(0x2028 - 0x0b));
+      __ cmp(current_character(), 1);
+      BranchOrBacktrack(below_equal, on_no_match);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    if (check_offset) {
+      CheckPosition(cp_offset, on_no_match);
+    }
+    return true;
+  // No custom implementation (yet): w, W, s(UC16), S(UC16).
+  default:
+    return false;
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::Fail() {
+  ASSERT(FAILURE == 0);  // Return value for failure is zero.
+  __ xor_(eax, Operand(eax));  // zero eax.
+  __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
+  // Finalize code - write the entry point code now we know how many
+  // registers we need.
+
+  // Entry code:
+  __ bind(&entry_label_);
+  // Start new stack frame.
+  __ push(ebp);
+  __ mov(ebp, esp);
+  // Save callee-save registers. Order here should correspond to order of
+  // kBackup_ebx etc.
+  __ push(esi);
+  __ push(edi);
+  __ push(ebx);  // Callee-save on MacOS.
+  __ push(Immediate(0));  // Make room for "input start - 1" constant.
+
+  // Check if we have space on the stack for registers.
+  Label stack_limit_hit;
+  Label stack_ok;
+
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ mov(ecx, esp);
+  __ sub(ecx, Operand::StaticVariable(stack_guard_limit));
+  // Handle it if the stack pointer is already below the stack limit.
+  __ j(below_equal, &stack_limit_hit, not_taken);
+  // Check if there is room for the variable number of registers above
+  // the stack limit.
+  __ cmp(ecx, num_registers_ * kPointerSize);
+  __ j(above_equal, &stack_ok, taken);
+  // Exit with OutOfMemory exception. There is not enough space on the stack
+  // for our working registers.
+  __ mov(eax, EXCEPTION);
+  __ jmp(&exit_label_);
+
+  __ bind(&stack_limit_hit);
+  CallCheckStackGuardState(ebx);
+  __ or_(eax, Operand(eax));
+  // If returned value is non-zero, we exit with the returned value as result.
+  __ j(not_zero, &exit_label_);
+
+  __ bind(&stack_ok);
+
+  // Allocate space on stack for registers.
+  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+  // Load string length.
+  __ mov(esi, Operand(ebp, kInputEnd));
+  // Load input position.
+  __ mov(edi, Operand(ebp, kInputStart));
+  // Set up edi to be negative offset from string end.
+  __ sub(edi, Operand(esi));
+  // Set eax to address of char before start of input
+  // (effectively string position -1).
+  __ lea(eax, Operand(edi, -char_size()));
+  // Store this value in a local variable, for use when clearing
+  // position registers.
+  __ mov(Operand(ebp, kInputStartMinusOne), eax);
+  if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+    // Fill saved registers with initial value = start offset - 1
+    // Fill in stack push order, to avoid accessing across an unwritten
+    // page (a problem on Windows).
+    __ mov(ecx, kRegisterZero);
+    Label init_loop;
+    __ bind(&init_loop);
+    __ mov(Operand(ebp, ecx, times_1, +0), eax);
+    __ sub(Operand(ecx), Immediate(kPointerSize));
+    __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+    __ j(greater, &init_loop);
+  }
+  // Ensure that we have written to each stack page, in order. Skipping a page
+  // on Windows can cause segmentation faults. Assuming page size is 4k.
+  const int kPageSize = 4096;
+  const int kRegistersPerPage = kPageSize / kPointerSize;
+  for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+      i < num_registers_;
+      i += kRegistersPerPage) {
+    __ mov(register_location(i), eax);  // One write every page.
+  }
+
+
+  // Initialize backtrack stack pointer.
+  __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+  // Load previous char as initial value of current-character.
+  Label at_start;
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  __ j(not_equal, &at_start);
+  LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+  __ jmp(&start_label_);
+  __ bind(&at_start);
+  __ mov(current_character(), '\n');
+  __ jmp(&start_label_);
+
+
+  // Exit code:
+  if (success_label_.is_linked()) {
+    // Save captures when successful.
+    __ bind(&success_label_);
+    if (num_saved_registers_ > 0) {
+      // copy captures to output
+      __ mov(ebx, Operand(ebp, kRegisterOutput));
+      __ mov(ecx, Operand(ebp, kInputEnd));
+      __ sub(ecx, Operand(ebp, kInputStart));
+      for (int i = 0; i < num_saved_registers_; i++) {
+        __ mov(eax, register_location(i));
+        __ add(eax, Operand(ecx));  // Convert to index from start, not end.
+        if (mode_ == UC16) {
+          __ sar(eax, 1);  // Convert byte index to character index.
+        }
+        __ mov(Operand(ebx, i * kPointerSize), eax);
+      }
+    }
+    __ mov(eax, Immediate(SUCCESS));
+  }
+  // Exit and return eax
+  __ bind(&exit_label_);
+  // Skip esp past regexp registers.
+  __ lea(esp, Operand(ebp, kBackup_ebx));
+  // Restore callee-save registers.
+  __ pop(ebx);
+  __ pop(edi);
+  __ pop(esi);
+  // Exit function frame, restore previous one.
+  __ pop(ebp);
+  __ ret(0);
+
+  // Backtrack code (branch target for conditional backtracks).
+  if (backtrack_label_.is_linked()) {
+    __ bind(&backtrack_label_);
+    Backtrack();
+  }
+
+  Label exit_with_exception;
+
+  // Preempt-code
+  if (check_preempt_label_.is_linked()) {
+    SafeCallTarget(&check_preempt_label_);
+
+    __ push(backtrack_stackpointer());
+    __ push(edi);
+
+    CallCheckStackGuardState(ebx);
+    __ or_(eax, Operand(eax));
+    // If returning non-zero, we should end execution with the given
+    // result as return value.
+    __ j(not_zero, &exit_label_);
+
+    __ pop(edi);
+    __ pop(backtrack_stackpointer());
+    // String might have moved: Reload esi from frame.
+    __ mov(esi, Operand(ebp, kInputEnd));
+    SafeReturn();
+  }
+
+  // Backtrack stack overflow code.
+  if (stack_overflow_label_.is_linked()) {
+    SafeCallTarget(&stack_overflow_label_);
+    // Reached if the backtrack-stack limit has been hit.
+
+    Label grow_failed;
+    // Save registers before calling C function
+    __ push(esi);
+    __ push(edi);
+
+    // Call GrowStack(backtrack_stackpointer())
+    int num_arguments = 2;
+    FrameAlign(num_arguments, ebx);
+    __ lea(eax, Operand(ebp, kStackHighEnd));
+    __ mov(Operand(esp, 1 * kPointerSize), eax);
+    __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
+    ExternalReference grow_stack = ExternalReference::re_grow_stack();
+    CallCFunction(grow_stack, num_arguments);
+    // If return NULL, we have failed to grow the stack, and
+    // must exit with a stack-overflow exception.
+    __ or_(eax, Operand(eax));
+    __ j(equal, &exit_with_exception);
+    // Otherwise use return value as new stack pointer.
+    __ mov(backtrack_stackpointer(), eax);
+    // Restore saved registers and continue.
+    __ pop(edi);
+    __ pop(esi);
+    SafeReturn();
+  }
+
+  if (exit_with_exception.is_linked()) {
+    // If any of the code above needed to exit with an exception.
+    __ bind(&exit_with_exception);
+    // Exit with Result EXCEPTION(-1) to signal thrown exception.
+    __ mov(eax, EXCEPTION);
+    __ jmp(&exit_label_);
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = Factory::NewCode(code_desc,
+                                       NULL,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(RegExpCodeCreateEvent(*code, *source));
+  return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerIA32::GoTo(Label* to) {
+  BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
+                                            int comparand,
+                                            Label* if_ge) {
+  __ cmp(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
+                                            int comparand,
+                                            Label* if_lt) {
+  __ cmp(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
+                                               Label* if_eq) {
+  __ cmp(edi, register_location(reg));
+  BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerIA32::Implementation() {
+  return kIA32Implementation;
+}
+
+
+void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
+                                                    Label* on_end_of_input,
+                                                    bool check_bounds,
+                                                    int characters) {
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerIA32::PopCurrentPosition() {
+  Pop(edi);
+}
+
+
+void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
+  Pop(eax);
+  __ mov(register_location(register_index), eax);
+}
+
+
+void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
+  Push(Immediate::CodeRelativeOffset(label));
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerIA32::PushCurrentPosition() {
+  Push(edi);
+}
+
+
+void RegExpMacroAssemblerIA32::PushRegister(int register_index,
+                                            StackCheckFlag check_stack_limit) {
+  __ mov(eax, register_location(register_index));
+  Push(eax);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
+  __ mov(edi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
+  __ mov(backtrack_stackpointer(), register_location(reg));
+  __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+}
+
+
+void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ mov(register_location(register_index), Immediate(to));
+}
+
+
+void RegExpMacroAssemblerIA32::Succeed() {
+  __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
+                                                              int cp_offset) {
+  if (cp_offset == 0) {
+    __ mov(register_location(reg), edi);
+  } else {
+    __ lea(eax, Operand(edi, cp_offset * char_size()));
+    __ mov(register_location(reg), eax);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
+  ASSERT(reg_from <= reg_to);
+  __ mov(eax, Operand(ebp, kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ mov(register_location(reg), eax);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
+  __ mov(eax, backtrack_stackpointer());
+  __ sub(eax, Operand(ebp, kStackHighEnd));
+  __ mov(register_location(reg), eax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
+  int num_arguments = 3;
+  FrameAlign(num_arguments, scratch);
+  // RegExp code frame pointer.
+  __ mov(Operand(esp, 2 * kPointerSize), ebp);
+  // Code* of self.
+  __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
+  // Next address on the stack (will be address of return address).
+  __ lea(eax, Operand(esp, -kPointerSize));
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
+  ExternalReference check_stack_guard =
+      ExternalReference::re_check_stack_guard_state();
+  CallCFunction(check_stack_guard, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  if (StackGuard::IsStackOverflow()) {
+    Top::StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid
+    int delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
+  return 0;
+}
+
+
+Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return Operand(ebp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
+                                             Label* on_outside_input) {
+  __ cmp(edi, -cp_offset * char_size());
+  BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
+                                                 Label* to,
+                                                 Hint hint) {
+  if (condition < 0) {  // No condition
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ j(condition, &backtrack_label_, hint);
+    return;
+  }
+  __ j(condition, to, hint);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
+  __ call(to);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeReturn() {
+  __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
+  __ ret(0);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerIA32::Push(Register source) {
+  ASSERT(!source.is(backtrack_stackpointer()));
+  // Notice: This updates flags, unlike normal Push.
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ mov(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerIA32::Push(Immediate value) {
+  // Notice: This updates flags, unlike normal Push.
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ mov(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerIA32::Pop(Register target) {
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ mov(target, Operand(backtrack_stackpointer(), 0));
+  // Notice: This updates flags, unlike normal Pop.
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerIA32::CheckPreemption() {
+  // Check for preemption.
+  Label no_preempt;
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+  __ j(above, &no_preempt, taken);
+
+  SafeCall(&check_preempt_label_);
+
+  __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckStackLimit() {
+  if (FLAG_check_stack) {
+    Label no_stack_overflow;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_regexp_stack_limit();
+    __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+    __ j(above, &no_stack_overflow);
+
+    SafeCall(&stack_overflow_label_);
+
+    __ bind(&no_stack_overflow);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
+  // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
+  // use it, e.g., for SafeCall), we know the number of elements on the stack
+  // since the last frame alignment. We might be able to do this simpler then.
+  int frameAlignment = OS::ActivationFrameAlignment();
+  if (frameAlignment != 0) {
+    // Make stack end at alignment and make room for num_arguments words
+    // and the original value of esp.
+    __ mov(scratch, esp);
+    __ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frameAlignment));
+    __ and_(esp, -frameAlignment);
+    __ mov(Operand(esp, num_arguments * kPointerSize), scratch);
+  } else {
+    __ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
+                                             int num_arguments) {
+  __ mov(Operand(eax), Immediate(function));
+  __ call(Operand(eax));
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ mov(esp, Operand(esp, num_arguments * kPointerSize));
+  } else {
+    __ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                             int characters) {
+  if (mode_ == ASCII) {
+    if (characters == 4) {
+      __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
+    } else if (characters == 2) {
+      __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
+    } else {
+      ASSERT(characters == 1);
+      __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
+    }
+  } else {
+    ASSERT(mode_ == UC16);
+    if (characters == 2) {
+      __ mov(current_character(),
+             Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+    } else {
+      ASSERT(characters == 1);
+      __ movzx_w(current_character(),
+                 Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+    }
+  }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+  __ int3();  // Unused on ia32.
+}
+
+#undef __
+
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
new file mode 100644
index 0000000..5ffd462
--- /dev/null
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -0,0 +1,232 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
+class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerIA32() { }
+  virtual ~RegExpMacroAssemblerIA32() { }
+};
+
+#else
+class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
+  virtual ~RegExpMacroAssemblerIA32();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+
+ private:
+  // Offsets from ebp of function parameters and stored registers.
+  static const int kFramePointer = 0;
+  // Above the frame pointer - function parameters and return address.
+  static const int kReturn_eip = kFramePointer + kPointerSize;
+  static const int kFrameAlign = kReturn_eip + kPointerSize;
+  // Parameters.
+  static const int kInputString = kFrameAlign;
+  static const int kStartIndex = kInputString + kPointerSize;
+  static const int kInputStart = kStartIndex + kPointerSize;
+  static const int kInputEnd = kInputStart + kPointerSize;
+  static const int kRegisterOutput = kInputEnd + kPointerSize;
+  static const int kAtStart = kRegisterOutput + kPointerSize;
+  static const int kStackHighEnd = kAtStart + kPointerSize;
+  // Below the frame pointer - local stack variables.
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kBackup_esi = kFramePointer - kPointerSize;
+  static const int kBackup_edi = kBackup_esi - kPointerSize;
+  static const int kBackup_ebx = kBackup_edi - kPointerSize;
+  static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  Operand register_location(int register_index);
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return edx; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return ecx; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer (ecx) by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
+  // by a word size and stores the value there.
+  inline void Push(Immediate value);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // (ecx) and increments it by a word size.
+  inline void Pop(Register target);
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, arguments must be stored in esp[0], esp[4],
+  // etc., not pushed. The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  inline void FrameAlign(int num_arguments, Register scratch);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by FrameAlign. The called function is not allowed to trigger a garbage
+  // collection, since that might move the code and invalidate the return
+  // address (unless this is somehow accounted for).
+  inline void CallCFunction(ExternalReference function, int num_arguments);
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (ASCII or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+};
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
+
+#endif  // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
new file mode 100644
index 0000000..99ae6eb
--- /dev/null
+++ b/src/ia32/register-allocator-ia32-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  // The code for this test relies on the order of register codes.
+  return reg.code() >= esp.code() && reg.code() <= esi.code();
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.  The mapping is:
+
+// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
+
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,   // eax
+    2,   // ecx
+    3,   // edx
+    1,   // ebx
+    -1,  // esp
+    -1,  // ebp
+    -1,  // esi
+    4    // edi
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved edi register is live on JS function entry.
+  Use(edi);  // JS function.
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
new file mode 100644
index 0000000..2914960
--- /dev/null
+++ b/src/ia32/register-allocator-ia32.cc
@@ -0,0 +1,99 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  ASSERT(is_valid());
+  if (is_constant()) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+      CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+    } else {
+      CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                 Immediate(handle()));
+    }
+    // This result becomes a copy of the fresh one.
+    *this = fresh;
+  }
+  ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+  ASSERT(is_valid());
+  if (!is_register() || !reg().is(target)) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+    ASSERT(fresh.is_valid());
+    if (is_register()) {
+      CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
+    } else {
+      ASSERT(is_constant());
+      if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+        CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+      } else {
+        CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                   Immediate(handle()));
+      }
+    }
+    *this = fresh;
+  } else if (is_register() && reg().is(target)) {
+    ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+    CodeGeneratorScope::Current()->frame()->Spill(target);
+    ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+  }
+  ASSERT(is_register());
+  ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  Result result = AllocateWithoutSpilling();
+  // Check that the register is a byte register.  If not, unuse the
+  // register if valid and return an invalid result.
+  if (result.is_valid() && !result.reg().is_byte_register()) {
+    result.Unuse();
+    return Result();
+  }
+  return result;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
new file mode 100644
index 0000000..e7ce91f
--- /dev/null
+++ b/src/ia32/register-allocator-ia32.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = 5;
+  static const int kInvalidRegister = -1;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/simulator-ia32.cc b/src/ia32/simulator-ia32.cc
new file mode 100644
index 0000000..ab81693
--- /dev/null
+++ b/src/ia32/simulator-ia32.cc
@@ -0,0 +1,30 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Since there is no simulator for the ia32 architecture this file is empty.
+
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
new file mode 100644
index 0000000..8fa4287
--- /dev/null
+++ b/src/ia32/simulator-ia32.h
@@ -0,0 +1,53 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_SIMULATOR_IA32_H_
+#define V8_IA32_SIMULATOR_IA32_H_
+
+#include "allocation.h"
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#endif  // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
new file mode 100644
index 0000000..ca4e142
--- /dev/null
+++ b/src/ia32/stub-cache-ia32.cc
@@ -0,0 +1,1876 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset,
+                       Register extra) {
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  ExternalReference value_offset(SCTableReference::valueReference(table));
+
+  Label miss;
+
+  if (extra.is_valid()) {
+    // Get the code entry from the cache.
+    __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ j(not_equal, &miss, not_taken);
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+    // Jump to the first instruction in the code stub.
+    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(extra));
+
+    __ bind(&miss);
+  } else {
+    // Save the offset on the stack.
+    __ push(offset);
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ j(not_equal, &miss, not_taken);
+
+    // Get the code entry from the cache.
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+    // Restore offset and re-load code entry from cache.
+    __ pop(offset);
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Jump to the first instruction in the code stub.
+    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(offset));
+
+    // Pop at miss.
+    __ bind(&miss);
+    __ pop(offset);
+  }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+  ASSERT(!extra.is(receiver));
+  ASSERT(!extra.is(name));
+  ASSERT(!extra.is(scratch));
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Get the map of the receiver and compute the hash.
+  __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch, extra);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  __ sub(scratch, Operand(name));
+  __ add(Operand(scratch), Immediate(flags));
+  __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch, extra);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Pushable name,
+                                     JSObject* holder_obj) {
+  __ push(receiver);
+  __ push(holder);
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+  __ push(receiver);
+  __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ mov(prototype,
+         FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ mov(prototype,
+         FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss_label, not_taken);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, miss_label, not_taken);
+
+  // Load length directly from the JS array.
+  __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
+  __ ret(0);
+}
+
+
+// Generate code to check if an object is a string.  If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the object isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, smi, not_taken);
+
+  // Check that the object is a string.
+  __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ test(scratch, Immediate(kNotStringTag));
+  __ j(not_zero, non_string_object, not_taken);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch,
+                                            Label* miss) {
+  Label load_length, check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch register.
+  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+  // Load length directly from the string.
+  __ bind(&load_length);
+  __ and_(scratch, kStringSizeMask);
+  __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
+  // ecx is also the receiver.
+  __ lea(ecx, Operand(scratch, String::kLongLengthShift));
+  __ shr(eax);  // ecx is implicit shift register.
+  __ shl(eax, kSmiTagSize);
+  __ ret(0);
+
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmp(scratch, JS_VALUE_TYPE);
+  __ j(not_equal, miss, not_taken);
+
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ mov(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, receiver, scratch, miss, miss);
+  __ jmp(&load_length);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(eax, Operand(scratch1));
+  __ ret(0);
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ mov(dst, FieldOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+    __ mov(dst, FieldOperand(dst, offset));
+  }
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Pushable name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+  __ mov(eax, Immediate(5));
+  __ mov(ebx, Immediate(ref));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+                                   StubCompiler* stub_compiler,
+                                   MacroAssembler* masm,
+                                   JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
+                                   LookupResult* lookup,
+                                   Register receiver,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* miss) {
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      stub_compiler->CheckPrototypes(object, receiver, holder,
+                                     scratch1, scratch2, name, miss);
+
+  if (lookup->IsValid() && lookup->IsCacheable()) {
+    compiler->CompileCacheable(masm,
+                               stub_compiler,
+                               receiver,
+                               reg,
+                               scratch1,
+                               scratch2,
+                               holder,
+                               lookup,
+                               name,
+                               miss);
+  } else {
+    compiler->CompileRegular(masm,
+                             receiver,
+                             reg,
+                             scratch2,
+                             holder,
+                             miss);
+  }
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+                                  String* name,
+                                  LookupResult* lookup) {
+  holder->LocalLookupRealNamedProperty(name, lookup);
+  if (lookup->IsNotFound()) {
+    Object* proto = holder->GetPrototype();
+    if (proto != Heap::null_value()) {
+      proto->Lookup(name, lookup);
+    }
+  }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    AccessorInfo* callback = 0;
+    bool optimize = false;
+    // So far the most popular follow ups for interceptor loads are FIELD
+    // and CALLBACKS, so inline only them, other cases may be added
+    // later.
+    if (lookup->type() == FIELD) {
+      optimize = true;
+    } else if (lookup->type() == CALLBACKS) {
+      Object* callback_object = lookup->GetCallbackObject();
+      if (callback_object->IsAccessorInfo()) {
+        callback = AccessorInfo::cast(callback_object);
+        optimize = callback->getter() != NULL;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    // Note: starting a frame here makes GC aware of pointers pushed below.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS) {
+      __ push(receiver);
+    }
+    __ push(holder);
+    __ push(name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    Label interceptor_failed;
+    __ cmp(eax, Factory::no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_);
+    __ pop(holder);
+    if (lookup->type() == CALLBACKS) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    if (lookup->type() == FIELD) {
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              miss_label);
+      stub_compiler->GenerateFastPropertyLoad(masm, eax,
+                                              holder, lookup->holder(),
+                                              lookup->GetFieldIndex());
+      __ ret(0);
+    } else {
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      Label cleanup;
+      __ pop(scratch2);
+      __ push(receiver);
+      __ push(scratch2);
+
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              &cleanup);
+
+      __ pop(scratch2);  // save old return address
+      __ push(holder);
+      __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
+      __ push(holder);
+      __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+      __ push(name_);
+      __ push(scratch2);  // restore old return address
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+      __ TailCallRuntime(ref, 5, 1);
+
+      __ bind(&cleanup);
+      __ pop(scratch1);
+      __ pop(scratch2);
+      __ push(scratch1);
+    }
+  }
+
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ pop(scratch);  // save old return address
+    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    __ push(scratch);  // restore old return address
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+    __ TailCallRuntime(ref, 5, 1);
+  }
+
+ private:
+  Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit CallInterceptorCompiler(const ParameterCount& arguments)
+      : arguments_(arguments), argc_(arguments.immediate()) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    JSFunction* function = 0;
+    bool optimize = false;
+    // So far the most popular case for failed interceptor is
+    // CONSTANT_FUNCTION sitting below.
+    if (lookup->type() == CONSTANT_FUNCTION) {
+      function = lookup->GetConstantFunction();
+      // JSArray holder is a special case for call constant function
+      // (see the corresponding code).
+      if (function->is_compiled() && !holder_obj->IsJSArray()) {
+        optimize = true;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    __ EnterInternalFrame();
+    __ push(holder);  // save the holder
+
+    CompileCallLoadPropertyWithInterceptor(
+        masm,
+        receiver,
+        holder,
+        // Under EnterInternalFrame this refers to name.
+        Operand(ebp, (argc_ + 3) * kPointerSize),
+        holder_obj);
+
+    __ pop(receiver);  // restore holder
+    __ LeaveInternalFrame();
+
+    __ cmp(eax, Factory::no_interceptor_result_sentinel());
+    Label invoke;
+    __ j(not_equal, &invoke);
+
+    stub_compiler->CheckPrototypes(holder_obj, receiver,
+                                   lookup->holder(), scratch1,
+                                   scratch2,
+                                   name,
+                                   miss_label);
+    if (lookup->holder()->IsGlobalObject()) {
+      __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
+      __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+      __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
+    }
+
+    ASSERT(function->is_compiled());
+    // Get the function and setup the context.
+    __ mov(edi, Immediate(Handle<JSFunction>(function)));
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+    // Jump to the cached code (tail call).
+    ASSERT(function->is_compiled());
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    __ InvokeCode(code, expected, arguments_,
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+    __ bind(&invoke);
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ EnterInternalFrame();
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             Operand(ebp, (argc_ + 3) * kPointerSize),
+                             holder_obj);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+    __ mov(eax, Immediate(5));
+    __ mov(ebx, Immediate(ref));
+
+    CEntryStub stub(1);
+    __ CallStub(&stub);
+
+    __ LeaveInternalFrame();
+  }
+
+ private:
+  const ParameterCount& arguments_;
+  int argc_;
+};
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ test(receiver_reg, Immediate(kSmiTagMask));
+  __ j(zero, miss_label, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, miss_label, not_taken);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ mov(ecx, Immediate(Handle<Map>(transition)));
+    Handle<Code> ic(Builtins::builtin(storage_extend));
+    __ jmp(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+           Immediate(Handle<Map>(transition)));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ mov(FieldOperand(receiver_reg, offset), eax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(scratch, offset), eax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+  }
+
+  // Return the value (register eax).
+  __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ mov(scratch, Immediate(Handle<Object>(cell)));
+      __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+             Immediate(Factory::the_hole_value()));
+      __ j(not_equal, miss, not_taken);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containin the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check the prototype chain.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Get the value from the properties.
+  GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ mov(reg, Immediate(Handle<AccessorInfo>(callback)));  // callback data
+  __ push(reg);
+  __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+  __ push(name_reg);  // name
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 5, 1);
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ mov(eax, Handle<Object>(value));
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  LoadInterceptorCompiler compiler(name_reg);
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         object,
+                         holder,
+                         name,
+                         lookup,
+                         receiver,
+                         scratch1,
+                         scratch2,
+                         miss);
+}
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+
+  __ push(edi);  // function is also the parameter to the runtime call
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ pop(edi);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(ecx));
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+
+  GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+        __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+      }
+      break;
+
+    case STRING_CHECK:
+      // Check that the object is a two-byte string or a symbol.
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+      __ cmp(ecx, FIRST_NONSTRING_TYPE);
+      __ j(above_equal, &miss, not_taken);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::STRING_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &fast, taken);
+      __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+      __ j(not_equal, &miss, not_taken);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::NUMBER_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ cmp(edx, Factory::true_value());
+      __ j(equal, &fast, taken);
+      __ cmp(edx, Factory::false_value());
+      __ j(not_equal, &miss, not_taken);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::BOOLEAN_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+    }
+
+    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+      // Make sure object->HasFastElements().
+      // Get the elements array of the object.
+      __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+      // Check that the object is in fast mode (not dictionary).
+      __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+             Immediate(Factory::fixed_array_map()));
+      __ j(not_equal, &miss, not_taken);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Get the function and setup the context.
+  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  CallInterceptorCompiler compiler(arguments());
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         JSObject::cast(object),
+                         holder,
+                         name,
+                         &lookup,
+                         edx,
+                         ebx,
+                         ecx,
+                         &miss);
+
+  // Restore receiver.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the function really is a function.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  __ mov(edi, eax);
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+  // Handle load cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(argc);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Setup the context (function already in edi).
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  __ IncrementCounter(&Counters::call_global_inline, 1);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     ebx, ecx, edx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(ebx, edx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ pop(ebx);  // remove the return address
+  __ push(Operand(esp, 0));  // receiver
+  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback info
+  __ push(ecx);  // name
+  __ push(eax);  // value
+  __ push(ebx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+  __ TailCallRuntime(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(ebx, edx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ pop(ebx);  // remove the return address
+  __ push(Operand(esp, 0));  // receiver
+  __ push(ecx);  // name
+  __ push(eax);  // value
+  __ push(ebx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+  __ TailCallRuntime(store_ic_property, 3, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ mov(ebx, Operand(esp, kPointerSize));
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Store the value in the cell.
+  __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+
+  // Return the value (register eax).
+  __ IncrementCounter(&Counters::named_store_global_inline, 1);
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+  // Get the name from the stack.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  // Check that the name has not changed.
+  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::KeyedStoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     ebx, ecx, edx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_store_field, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  // TODO(368): Compile in the whole chain: all the interceptors in
+  // prototypes and ultimate answer.
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          eax,
+                          ecx,
+                          edx,
+                          ebx,
+                          name,
+                          &miss);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ cmp(eax, Factory::the_hole_value());
+    __ j(equal, &miss, not_taken);
+  } else if (FLAG_debug_code) {
+    __ cmp(eax, Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
+  }
+
+  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ ret(0);
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
+
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_field, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_callback, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
+                       value, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          ecx,
+                          eax,
+                          edx,
+                          ebx,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadArrayLength(masm(), ecx, edx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadStringLength(masm(), ecx, edx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadFunctionPrototype(masm(), ecx, edx, ebx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- edi : constructor
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_stub_call;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(not_equal, &generic_stub_call, not_taken);
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+  // Will both indicate a NULL and a Smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &generic_stub_call);
+  __ CmpObjectType(ebx, MAP_TYPE, ecx);
+  __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // edi: constructor
+  // ebx: initial map
+  __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
+  __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+  // Now allocate the JSObject on the heap by moving the new space allocation
+  // top forward.
+  // edi: constructor
+  // ebx: initial map
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ shl(ecx, kPointerSizeLog2);
+  __ AllocateInNewSpace(ecx,
+                        edx,
+                        ecx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
+
+  // Allocated the JSObject, now initialize the fields and add the heap tag.
+  // ebx: initial map
+  // edx: JSObject (untagged)
+  __ mov(Operand(edx, JSObject::kMapOffset), ebx);
+  __ mov(ebx, Factory::empty_fixed_array());
+  __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
+  __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
+
+  // Push the allocated object to the stack. This is the object that will be
+  // returned (after it is tagged).
+  __ push(edx);
+
+  // eax: argc
+  // edx: JSObject (untagged)
+  // Load the address of the first in-object property into edx.
+  __ lea(edx, Operand(edx, JSObject::kHeaderSize));
+  // Calculate the location of the first argument. The stack contains the
+  // allocated object and the return address on top of the argc arguments.
+  __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
+
+  // Use edi for holding undefined which is used in several places below.
+  __ mov(edi, Factory::undefined_value());
+
+  // eax: argc
+  // ecx: first argument
+  // edx: first in-object property of the JSObject
+  // edi: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed;
+      // Set the property to undefined.
+      __ mov(Operand(edx, i * kPointerSize), edi);
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ cmp(eax, arg_number);
+      __ j(below_equal, &not_passed);
+      // Argument passed - find it on the stack.
+      __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+      __ mov(Operand(edx, i * kPointerSize), ebx);
+      __ bind(&not_passed);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  for (int i = shared->this_property_assignments_count();
+       i < shared->CalculateInObjectProperties();
+       i++) {
+    __ mov(Operand(edx, i * kPointerSize), edi);
+  }
+
+  // Move argc to ebx and retrieve and tag the JSObject to return.
+  __ mov(ebx, eax);
+  __ pop(eax);
+  __ or_(Operand(eax), Immediate(kHeapObjectTag));
+
+  // Remove caller arguments and receiver from the stack and return.
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+  __ push(ecx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+  __ ret(0);
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
new file mode 100644
index 0000000..1b8232f
--- /dev/null
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -0,0 +1,1085 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  // Emit code to write elements below the stack pointer to their
+  // (already allocated) stack address.
+  ASSERT(index <= stack_pointer_);
+  FrameElement element = elements_[index];
+  ASSERT(!element.is_synced());
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      break;
+
+    case FrameElement::MEMORY:
+      // This function should not be called with synced elements.
+      // (memory elements are always synced).
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ mov(Operand(ebp, fp_relative(index)), element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        __ Set(Operand(ebp, fp_relative(index)),
+               Immediate(element.handle()));
+      }
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing_element = elements_[backing_index];
+      if (backing_element.is_memory()) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        ASSERT(backing_element.is_register());
+        __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  // Sync an element of the frame that is just above the stack pointer
+  // by pushing it.
+  ASSERT(index == stack_pointer_ + 1);
+  stack_pointer_++;
+  FrameElement element = elements_[index];
+
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      __ push(Immediate(Smi::FromInt(0)));
+      break;
+
+    case FrameElement::MEMORY:
+      // No memory elements exist above the stack pointer.
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ push(element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
+        __ push(temp.reg());
+      } else {
+        __ push(Immediate(element.handle()));
+      }
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing = elements_[backing_index];
+      ASSERT(backing.is_memory() || backing.is_register());
+      if (backing.is_memory()) {
+        __ push(Operand(ebp, fp_relative(backing_index)));
+      } else {
+        __ push(backing.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+// Clear the dirty bits for the range of elements in
+// [min(stack_pointer_ + 1,begin), end].
+void VirtualFrame::SyncRange(int begin, int end) {
+  ASSERT(begin >= 0);
+  ASSERT(end < element_count());
+  // Sync elements below the range if they have not been materialized
+  // on the stack.
+  int start = Min(begin, stack_pointer_ + 1);
+
+  // If positive we have to adjust the stack pointer.
+  int delta = end - stack_pointer_;
+  if (delta > 0) {
+    stack_pointer_ = end;
+    __ sub(Operand(esp), Immediate(delta * kPointerSize));
+  }
+
+  for (int i = start; i <= end; i++) {
+    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+  }
+}
+
+
+void VirtualFrame::MakeMergable() {
+  for (int i = 0; i < element_count(); i++) {
+    FrameElement element = elements_[i];
+
+    if (element.is_constant() || element.is_copy()) {
+      if (element.is_synced()) {
+        // Just spill.
+        elements_[i] = FrameElement::MemoryElement();
+      } else {
+        // Allocate to a register.
+        FrameElement backing_element;  // Invalid if not a copy.
+        if (element.is_copy()) {
+          backing_element = elements_[element.index()];
+        }
+        Result fresh = cgen()->allocator()->Allocate();
+        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
+        elements_[i] =
+            FrameElement::RegisterElement(fresh.reg(),
+                                          FrameElement::NOT_SYNCED);
+        Use(fresh.reg(), i);
+
+        // Emit a move.
+        if (element.is_constant()) {
+          if (cgen()->IsUnsafeSmi(element.handle())) {
+            cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
+          } else {
+            __ Set(fresh.reg(), Immediate(element.handle()));
+          }
+        } else {
+          ASSERT(element.is_copy());
+          // Copies are only backed by register or memory locations.
+          if (backing_element.is_register()) {
+            // The backing store may have been spilled by allocating,
+            // but that's OK.  If it was, the value is right where we
+            // want it.
+            if (!fresh.reg().is(backing_element.reg())) {
+              __ mov(fresh.reg(), backing_element.reg());
+            }
+          } else {
+            ASSERT(backing_element.is_memory());
+            __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
+          }
+        }
+      }
+      // No need to set the copied flag --- there are no copies.
+    } else {
+      // Clear the copy flag of non-constant, non-copy elements.
+      // They cannot be copied because copies are not allowed.
+      // The copy flag is not relied on before the end of this loop,
+      // including when registers are spilled.
+      elements_[i].clear_copied();
+    }
+  }
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  Comment cmnt(masm(), "[ Merge frame");
+  // We should always be merging the code generator's current frame to an
+  // expected frame.
+  ASSERT(cgen()->frame() == this);
+
+  // Adjust the stack pointer upward (toward the top of the virtual
+  // frame) if necessary.
+  if (stack_pointer_ < expected->stack_pointer_) {
+    int difference = expected->stack_pointer_ - stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ sub(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  MergeMoveRegistersToMemory(expected);
+  MergeMoveRegistersToRegisters(expected);
+  MergeMoveMemoryToRegisters(expected);
+
+  // Adjust the stack pointer downward if necessary.
+  if (stack_pointer_ > expected->stack_pointer_) {
+    int difference = stack_pointer_ - expected->stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ add(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  // At this point, the frames should be identical.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Move registers, constants, and copies to memory.  Perform moves
+  // from the top downward in the frame in order to leave the backing
+  // stores of copies in registers.
+  //
+  // Moving memory-backed copies to memory requires a spare register
+  // for the memory-to-memory moves.  Since we are performing a merge,
+  // we use esi (which is already saved in the frame).  We keep track
+  // of the index of the frame element esi is caching or kIllegalIndex
+  // if esi has not been disturbed.
+  int esi_caches = kIllegalIndex;
+  for (int i = element_count() - 1; i >= 0; i--) {
+    FrameElement target = expected->elements_[i];
+    if (target.is_register()) continue;  // Handle registers later.
+    if (target.is_memory()) {
+      FrameElement source = elements_[i];
+      switch (source.type()) {
+        case FrameElement::INVALID:
+          // Not a legal merge move.
+          UNREACHABLE();
+          break;
+
+        case FrameElement::MEMORY:
+          // Already in place.
+          break;
+
+        case FrameElement::REGISTER:
+          Unuse(source.reg());
+          if (!source.is_synced()) {
+            __ mov(Operand(ebp, fp_relative(i)), source.reg());
+          }
+          break;
+
+        case FrameElement::CONSTANT:
+          if (!source.is_synced()) {
+            if (cgen()->IsUnsafeSmi(source.handle())) {
+              esi_caches = i;
+              cgen()->LoadUnsafeSmi(esi, source.handle());
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
+            }
+          }
+          break;
+
+        case FrameElement::COPY:
+          if (!source.is_synced()) {
+            int backing_index = source.index();
+            FrameElement backing_element = elements_[backing_index];
+            if (backing_element.is_memory()) {
+              // If we have to spill a register, we spill esi.
+              if (esi_caches != backing_index) {
+                esi_caches = backing_index;
+                __ mov(esi, Operand(ebp, fp_relative(backing_index)));
+              }
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              ASSERT(backing_element.is_register());
+              __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
+            }
+          }
+          break;
+      }
+    }
+    elements_[i] = target;
+  }
+
+  if (esi_caches != kIllegalIndex) {
+    __ mov(esi, Operand(ebp, fp_relative(context_index())));
+  }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+  // We have already done X-to-memory moves.
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    // Move the right value into register i if it is currently in a register.
+    int index = expected->register_location(i);
+    int use_index = register_location(i);
+    // Skip if register i is unused in the target or else if source is
+    // not a register (this is not a register-to-register move).
+    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+    Register target = RegisterAllocator::ToRegister(i);
+    Register source = elements_[index].reg();
+    if (index != use_index) {
+      if (use_index == kIllegalIndex) {  // Target is currently unused.
+        // Copy contents of source from source to target.
+        // Set frame element register to target.
+        Use(target, index);
+        Unuse(source);
+        __ mov(target, source);
+      } else {
+        // Exchange contents of registers source and target.
+        // Nothing except the register backing use_index has changed.
+        elements_[use_index].set_reg(source);
+        set_register_location(target, index);
+        set_register_location(source, use_index);
+        __ xchg(source, target);
+      }
+    }
+
+    if (!elements_[index].is_synced() &&
+        expected->elements_[index].is_synced()) {
+      __ mov(Operand(ebp, fp_relative(index)), target);
+    }
+    elements_[index] = expected->elements_[index];
+  }
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+  // Move memory, constants, and copies to registers.  This is the
+  // final step and since it is not done from the bottom up, but in
+  // register code order, we have special code to ensure that the backing
+  // elements of copies are in their correct locations when we
+  // encounter the copies.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int index = expected->register_location(i);
+    if (index != kIllegalIndex) {
+      FrameElement source = elements_[index];
+      FrameElement target = expected->elements_[index];
+      Register target_reg = RegisterAllocator::ToRegister(i);
+      ASSERT(target.reg().is(target_reg));
+      switch (source.type()) {
+        case FrameElement::INVALID:  // Fall through.
+          UNREACHABLE();
+          break;
+        case FrameElement::REGISTER:
+          ASSERT(source.Equals(target));
+          // Go to next iteration.  Skips Use(target_reg) and syncing
+          // below.  It is safe to skip syncing because a target
+          // register frame element would only be synced if all source
+          // elements were.
+          continue;
+          break;
+        case FrameElement::MEMORY:
+          ASSERT(index <= stack_pointer_);
+          __ mov(target_reg, Operand(ebp, fp_relative(index)));
+          break;
+
+        case FrameElement::CONSTANT:
+          if (cgen()->IsUnsafeSmi(source.handle())) {
+            cgen()->LoadUnsafeSmi(target_reg, source.handle());
+          } else {
+           __ Set(target_reg, Immediate(source.handle()));
+          }
+          break;
+
+        case FrameElement::COPY: {
+          int backing_index = source.index();
+          FrameElement backing = elements_[backing_index];
+          ASSERT(backing.is_memory() || backing.is_register());
+          if (backing.is_memory()) {
+            ASSERT(backing_index <= stack_pointer_);
+            // Code optimization if backing store should also move
+            // to a register: move backing store to its register first.
+            if (expected->elements_[backing_index].is_register()) {
+              FrameElement new_backing = expected->elements_[backing_index];
+              Register new_backing_reg = new_backing.reg();
+              ASSERT(!is_used(new_backing_reg));
+              elements_[backing_index] = new_backing;
+              Use(new_backing_reg, backing_index);
+              __ mov(new_backing_reg,
+                     Operand(ebp, fp_relative(backing_index)));
+              __ mov(target_reg, new_backing_reg);
+            } else {
+              __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
+            }
+          } else {
+            __ mov(target_reg, backing.reg());
+          }
+        }
+      }
+      // Ensure the proper sync state.
+      if (target.is_synced() && !source.is_synced()) {
+        __ mov(Operand(ebp, fp_relative(index)), target_reg);
+      }
+      Use(target_reg, index);
+      elements_[index] = target;
+    }
+  }
+}
+
+
+void VirtualFrame::Enter() {
+  // Registers live on entry: esp, ebp, esi, edi.
+  Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that edi contains a JS function.  The following code
+  // relies on eax being available for use.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ Check(not_zero,
+           "VirtualFrame::Enter - edi is not a function (smi check).");
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+  __ Check(equal,
+           "VirtualFrame::Enter - edi is not a function (map check).");
+#endif
+
+  EmitPush(ebp);
+
+  __ mov(ebp, Operand(esp));
+
+  // Store the context in the frame.  The context is kept in esi and a
+  // copy is stored in the frame.  The external reference to esi
+  // remains.
+  EmitPush(esi);
+
+  // Store the function in the frame.  The frame owns the register
+  // reference now (ie, it can keep it in edi or spill it later).
+  Push(edi);
+  SyncElementAt(element_count() - 1);
+  cgen()->allocator()->Unuse(edi);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Avoid using the leave instruction here, because it is too
+  // short. We need the return sequence to be a least the size of a
+  // call instruction to support patching the exit code in the
+  // debugger. See VisitReturnStatement for the full return sequence.
+  __ mov(esp, Operand(ebp));
+  stack_pointer_ = frame_pointer();
+  for (int i = element_count() - 1; i > stack_pointer_; i--) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      Unuse(last.reg());
+    }
+  }
+
+  EmitPop(ebp);
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    // The locals are initialized to a constant (the undefined value), but
+    // we sync them with the actual frame to allocate space for spilling
+    // them later.  First sync everything above the stack pointer so we can
+    // use pushes to allocate and initialize the locals.
+    SyncRange(stack_pointer_ + 1, element_count() - 1);
+    Handle<Object> undefined = Factory::undefined_value();
+    FrameElement initial_value =
+        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ Set(temp.reg(), Immediate(undefined));
+    for (int i = 0; i < count; i++) {
+      elements_.Add(initial_value);
+      stack_pointer_++;
+      __ push(temp.reg());
+    }
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(Operand(ebp, fp_relative(context_index())), esi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(esi, Operand(ebp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  Result temp = cgen()->allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ lea(temp.reg(), ParameterAt(-1));
+  Push(&temp);
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  FrameElement original = elements_[index];
+
+  // Is this element the backing store of any copies?
+  int new_backing_index = kIllegalIndex;
+  if (original.is_copied()) {
+    // Verify it is copied, and find first copy.
+    for (int i = index + 1; i < element_count(); i++) {
+      if (elements_[i].is_copy() && elements_[i].index() == index) {
+        new_backing_index = i;
+        break;
+      }
+    }
+  }
+
+  if (new_backing_index == kIllegalIndex) {
+    // No copies found, return kIllegalIndex.
+    if (original.is_register()) {
+      Unuse(original.reg());
+    }
+    elements_[index] = FrameElement::InvalidElement();
+    return kIllegalIndex;
+  }
+
+  // This is the backing store of copies.
+  Register backing_reg;
+  if (original.is_memory()) {
+    Result fresh = cgen()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    Use(fresh.reg(), new_backing_index);
+    backing_reg = fresh.reg();
+    __ mov(backing_reg, Operand(ebp, fp_relative(index)));
+  } else {
+    // The original was in a register.
+    backing_reg = original.reg();
+    set_register_location(backing_reg, new_backing_index);
+  }
+  // Invalidate the element at index.
+  elements_[index] = FrameElement::InvalidElement();
+  // Set the new backing element.
+  if (elements_[new_backing_index].is_synced()) {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+  } else {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+  }
+  // Update the other copies.
+  for (int i = new_backing_index + 1; i < element_count(); i++) {
+    if (elements_[i].is_copy() && elements_[i].index() == index) {
+      elements_[i].set_index(new_backing_index);
+      elements_[new_backing_index].set_copied();
+    }
+  }
+  return new_backing_index;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index <= element_count());
+  FrameElement original = elements_[index];
+  int new_backing_store_index = InvalidateFrameSlotAt(index);
+  if (new_backing_store_index != kIllegalIndex) {
+    elements_.Add(CopyElementAt(new_backing_store_index));
+    return;
+  }
+
+  switch (original.type()) {
+    case FrameElement::MEMORY: {
+      // Emit code to load the original element's data into a register.
+      // Push that register as a FrameElement on top of the frame.
+      Result fresh = cgen()->allocator()->Allocate();
+      ASSERT(fresh.is_valid());
+      FrameElement new_element =
+          FrameElement::RegisterElement(fresh.reg(),
+                                        FrameElement::NOT_SYNCED);
+      Use(fresh.reg(), element_count());
+      elements_.Add(new_element);
+      __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
+      break;
+    }
+    case FrameElement::REGISTER:
+      Use(original.reg(), element_count());
+      // Fall through.
+    case FrameElement::CONSTANT:
+    case FrameElement::COPY:
+      original.clear_sync();
+      elements_.Add(original);
+      break;
+    case FrameElement::INVALID:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  // Store the value on top of the frame to the virtual frame slot at
+  // a given index.  The value on top of the frame is left in place.
+  // This is a duplicating operation, so it can create copies.
+  ASSERT(index >= 0);
+  ASSERT(index < element_count());
+
+  int top_index = element_count() - 1;
+  FrameElement top = elements_[top_index];
+  FrameElement original = elements_[index];
+  if (top.is_copy() && top.index() == index) return;
+  ASSERT(top.is_valid());
+
+  InvalidateFrameSlotAt(index);
+
+  // InvalidateFrameSlotAt can potentially change any frame element, due
+  // to spilling registers to allocate temporaries in order to preserve
+  // the copy-on-write semantics of aliased elements.  Reload top from
+  // the frame.
+  top = elements_[top_index];
+
+  if (top.is_copy()) {
+    // There are two cases based on the relative positions of the
+    // stored-to slot and the backing slot of the top element.
+    int backing_index = top.index();
+    ASSERT(backing_index != index);
+    if (backing_index < index) {
+      // 1. The top element is a copy of a slot below the stored-to
+      // slot.  The stored-to slot becomes an unsynced copy of that
+      // same backing slot.
+      elements_[index] = CopyElementAt(backing_index);
+    } else {
+      // 2. The top element is a copy of a slot above the stored-to
+      // slot.  The stored-to slot becomes the new (unsynced) backing
+      // slot and both the top element and the element at the former
+      // backing slot become copies of it.  The sync state of the top
+      // and former backing elements is preserved.
+      FrameElement backing_element = elements_[backing_index];
+      ASSERT(backing_element.is_memory() || backing_element.is_register());
+      if (backing_element.is_memory()) {
+        // Because sets of copies are canonicalized to be backed by
+        // their lowest frame element, and because memory frame
+        // elements are backed by the corresponding stack address, we
+        // have to move the actual value down in the stack.
+        //
+        // TODO(209): considering allocating the stored-to slot to the
+        // temp register.  Alternatively, allow copies to appear in
+        // any order in the frame and lazily move the value down to
+        // the slot.
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        set_register_location(backing_element.reg(), index);
+        if (backing_element.is_synced()) {
+          // If the element is a register, we will not actually move
+          // anything on the stack but only update the virtual frame
+          // element.
+          backing_element.clear_sync();
+        }
+      }
+      elements_[index] = backing_element;
+
+      // The old backing element becomes a copy of the new backing
+      // element.
+      FrameElement new_element = CopyElementAt(index);
+      elements_[backing_index] = new_element;
+      if (backing_element.is_synced()) {
+        elements_[backing_index].set_sync();
+      }
+
+      // All the copies of the old backing element (including the top
+      // element) become copies of the new backing element.
+      for (int i = backing_index + 1; i < element_count(); i++) {
+        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
+          elements_[i].set_index(index);
+        }
+      }
+    }
+    return;
+  }
+
+  // Move the top element to the stored-to slot and replace it (the
+  // top element) with a copy.
+  elements_[index] = top;
+  if (top.is_memory()) {
+    // TODO(209): consider allocating the stored-to slot to the temp
+    // register.  Alternatively, allow copies to appear in any order
+    // in the frame and lazily move the value down to the slot.
+    FrameElement new_top = CopyElementAt(index);
+    new_top.set_sync();
+    elements_[top_index] = new_top;
+
+    // The sync state of the former top element is correct (synced).
+    // Emit code to move the value down in the frame.
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), Operand(esp, 0));
+    __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+  } else if (top.is_register()) {
+    set_register_location(top.reg(), index);
+    // The stored-to slot has the (unsynced) register reference and
+    // the top element becomes a copy.  The sync state of the top is
+    // preserved.
+    FrameElement new_top = CopyElementAt(index);
+    if (top.is_synced()) {
+      new_top.set_sync();
+      elements_[index].clear_sync();
+    }
+    elements_[top_index] = new_top;
+  } else {
+    // The stored-to slot holds the same value as the top but
+    // unsynced.  (We do not have copies of constants yet.)
+    ASSERT(top.is_constant());
+    elements_[index].clear_sync();
+  }
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  // Grow the expression stack by handler size less one (the return
+  // address is already pushed by a call instruction).
+  Adjust(kHandlerSize - 1);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallStub(stub);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  PrepareForCall(0, 0);
+  arg->ToRegister(eax);
+  arg->Unuse();
+  return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  PrepareForCall(0, 0);
+
+  if (arg0->is_register() && arg0->reg().is(eax)) {
+    if (arg1->is_register() && arg1->reg().is(edx)) {
+      // Wrong registers.
+      __ xchg(eax, edx);
+    } else {
+      // Register edx is free for arg0, which frees eax for arg1.
+      arg0->ToRegister(edx);
+      arg1->ToRegister(eax);
+    }
+  } else {
+    // Register eax is free for arg1, which guarantees edx is free for
+    // arg0.
+    arg1->ToRegister(eax);
+    arg0->ToRegister(edx);
+  }
+
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ InvokeBuiltin(id, flag);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ call(code, rmode);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+  // Name and receiver are on the top of the frame.  The IC expects
+  // name in ecx and receiver on the stack.  It does not drop the
+  // receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  Result name = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+  name.ToRegister(ecx);
+  name.Unuse();
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
+  // Key and receiver are on top of the frame.  The IC expects them on
+  // the stack.  It does not drop them.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallStoreIC() {
+  // Name, value, and receiver are on top of the frame.  The IC
+  // expects name in ecx, value in eax, and receiver on the stack.  It
+  // does not drop the receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  Result name = Pop();
+  Result value = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+
+  if (value.is_register() && value.reg().is(ecx)) {
+    if (name.is_register() && name.reg().is(eax)) {
+      // Wrong registers.
+      __ xchg(eax, ecx);
+    } else {
+      // Register eax is free for value, which frees ecx for name.
+      value.ToRegister(eax);
+      name.ToRegister(ecx);
+    }
+  } else {
+    // Register ecx is free for name, which guarantees eax is free for
+    // value.
+    name.ToRegister(ecx);
+    value.ToRegister(eax);
+  }
+
+  name.Unuse();
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+  // Value, key, and receiver are on the top of the frame.  The IC
+  // expects value in eax and key and receiver on the stack.  It does
+  // not drop the key and receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  // TODO(1222589): Make the IC grab the values from the stack.
+  Result value = Pop();
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  value.ToRegister(eax);
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
+                                int arg_count,
+                                int loop_nesting) {
+  // Arguments, receiver, and function name are on top of the frame.
+  // The IC expects them on the stack.  It does not drop the function
+  // name slot (but it does drop the rest).
+  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+  // Spill args, receiver, and function.  The call will drop args and
+  // receiver.
+  PrepareForCall(arg_count + 2, arg_count + 1);
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallConstructor(int arg_count) {
+  // Arguments, receiver, and function are on top of the frame.  The
+  // IC expects arg count in eax, function in edi, and the arguments
+  // and receiver on the stack.
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  // Duplicate the function before preparing the frame.
+  PushElementAt(arg_count + 1);
+  Result function = Pop();
+  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill args and receiver.
+  function.ToRegister(edi);
+
+  // Constructors are called with the number of arguments in register
+  // eax for now. Another option would be to have separate construct
+  // call trampolines per different arguments counts encountered.
+  Result num_args = cgen()->allocator()->Allocate(eax);
+  ASSERT(num_args.is_valid());
+  __ Set(num_args.reg(), Immediate(arg_count));
+
+  function.Unuse();
+  num_args.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  FrameElement element = elements_.RemoveLast();
+  int index = element_count();
+  ASSERT(element.is_valid());
+
+  bool pop_needed = (stack_pointer_ == index);
+  if (pop_needed) {
+    stack_pointer_--;
+    if (element.is_memory()) {
+      Result temp = cgen()->allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ pop(temp.reg());
+      return temp;
+    }
+
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  ASSERT(!element.is_memory());
+
+  // The top element is a register, constant, or a copy.  Unuse
+  // registers and follow copies to their backing store.
+  if (element.is_register()) {
+    Unuse(element.reg());
+  } else if (element.is_copy()) {
+    ASSERT(element.index() < index);
+    index = element.index();
+    element = elements_[index];
+  }
+  ASSERT(!element.is_copy());
+
+  // The element is memory, a register, or a constant.
+  if (element.is_memory()) {
+    // Memory elements could only be the backing store of a copy.
+    // Allocate the original to a register.
+    ASSERT(index <= stack_pointer_);
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    Use(temp.reg(), index);
+    FrameElement new_element =
+        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+    // Preserve the copy flag on the element.
+    if (element.is_copied()) new_element.set_copied();
+    elements_[index] = new_element;
+    __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
+    return Result(temp.reg());
+  } else if (element.is_register()) {
+    return Result(element.reg());
+  } else {
+    ASSERT(element.is_constant());
+    return Result(element.handle());
+  }
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(Operand operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(Operand operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(immediate);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
new file mode 100644
index 0000000..314ea73
--- /dev/null
+++ b/src/ia32/virtual-frame-ia32.h
@@ -0,0 +1,575 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
+#define V8_IA32_VIRTUAL_FRAME_IA32_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame: public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
+
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
+
+   private:
+    bool previous_state_;
+
+    CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() { return element_count() - expression_base_index(); }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget count elements from the top of the frame all in-memory
+  // (including synced) and adjust the stack pointer downward, to
+  // match an external frame effect (examples include a call removing
+  // its arguments, and exiting a try/catch removing an exception
+  // handler).  No code will be emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Make this frame so that an arbitrary frame of the same height can
+  // be merged to it.  Copies and constants are removed from the frame.
+  void MakeMergable();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Use(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // An element of the expression stack as an assembly operand.
+  Operand ElementAt(int index) const {
+    return Operand(esp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  void StoreToElementAt(int index) {
+    StoreToFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  Operand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return Operand(ebp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // Push the function on top of the frame.
+  void PushFunction() {
+    PushFrameSlotAt(function_index());
+  }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  Operand ParameterAt(int index) {
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index < parameter_count());
+    return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  Operand Receiver() {
+    return ParameterAt(-1);
+  }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
+
+  // Call stub that takes a single argument passed in eax.  The
+  // argument is given as a result which does not have to be eax or
+  // even a register.  The argument is consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that takes a pair of arguments passed in edx (arg0) and
+  // eax (arg1).  The arguments are given as results which do not have
+  // to be in the proper registers or even in registers.  The
+  // arguments are consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  Result CallRuntime(Runtime::Function* f, int arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
+
+  // Call load IC.  Name and receiver are found on top of the frame.
+  // Receiver is not dropped.
+  Result CallLoadIC(RelocInfo::Mode mode);
+
+  // Call keyed load IC.  Key and receiver are found on top of the
+  // frame.  They are not dropped.
+  Result CallKeyedLoadIC(RelocInfo::Mode mode);
+
+  // Call store IC.  Name, value, and receiver are found on top of the
+  // frame.  Receiver is not dropped.
+  Result CallStoreIC();
+
+  // Call keyed store IC.  Value, key, and receiver are found on top
+  // of the frame.  Key and receiver are not dropped.
+  Result CallKeyedStoreIC();
+
+  // Call call IC.  Arguments, reciever, and function name are found
+  // on top of the frame.  Function name slot is not dropped.  The
+  // argument count does not include the receiver.
+  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
+  // Allocate and call JS function as constructor.  Arguments,
+  // receiver (global object), and function are found on top of the
+  // frame.  Function is not dropped.  The argument count does not
+  // include the receiver.
+  Result CallConstructor(int arg_count);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() {
+    Drop(1);
+  }
+
+  // Duplicate the top element of the frame.
+  void Dup() {
+    PushFrameSlotAt(element_count() - 1);
+  }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  void EmitPop(Operand operand);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  void EmitPush(Operand operand);
+  void EmitPush(Immediate immediate);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) {
+    Push(Handle<Object> (value));
+  }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the esp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() {
+    return cgen()->scope()->num_parameters();
+  }
+  int local_count() {
+    return cgen()->scope()->num_stack_slots();
+  }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).  The parameters, receiver, and return address
+  // are below the frame pointer.
+  int frame_pointer() {
+    return parameter_count() + 2;
+  }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() {
+    return 1;
+  }
+
+  // The index of the context slot in the frame.  It is immediately
+  // above the frame pointer.
+  int context_index() {
+    return frame_pointer() + 1;
+  }
+
+  // The index of the function slot in the frame.  It is above the frame
+  // pointer and the context slot.
+  int function_index() {
+    return frame_pointer() + 2;
+  }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lie the context and the function.
+  int local0_index() {
+    return frame_pointer() + 3;
+  }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() {
+    return local0_index() + local_count();
+  }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_VIRTUAL_FRAME_IA32_H_